code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import collections
try:
from collections import MutableSequence
except Exception:
from collections.abc import MutableSequence
from copy import deepcopy
import decimal
try:
from sanic.exceptions import abort
except Exception:
from sanic.exceptions import SanicException as abort
from sanic.exceptions import InvalidUsage
from sanic.request import Request, RequestParameters
from sanic.compat import Header
class Namespace(collections.UserDict):
def __missing__(self, name):
raise AttributeError(name)
def __getattr__(self, name):
return self.__getitem__(name)
_friendly_location = {
'json': 'the JSON body',
'form': 'the post body',
'args': 'the query string',
'values': 'the post body or the query string',
'headers': 'the HTTP headers',
'cookies': 'the request\'s cookies',
'files': 'an uploaded file',
}
class Argument(object):
"""
:param name: Either a name or a list of option strings, e.g. foo or
-f, --foo.
:param default: The value produced if the argument is absent from the
request.
:param dest: The name of the attribute to be added to the object
returned by :meth:`~reqparse.RequestParser.parse_args()`.
:param bool required: Whether or not the argument may be omitted (optionals
only).
:param action: The basic type of action to be taken when this argument
is encountered in the request. Valid options are "store" and "append".
:param ignore: Whether to ignore cases where the argument fails type
conversion
:param type: The type to which the request argument should be
converted. If a type raises an exception, the message in the
error will be returned in the response. Defaults to :class:`unicode`
in python2 and :class:`str` in python3.
:param location: The attributes of the :class:`sanic.Request` object
to source the arguments from (ex: headers, args, etc.), can be an
iterator. The last item listed takes precedence in the result set.
:param choices: A container of the allowable values for the argument.
:param help: A brief description of the argument, returned in the
response when the argument is invalid. May optionally contain
an "{error_msg}" interpolation token, which will be replaced with
the text of the error raised by the type converter.
:param bool case_sensitive: Whether argument values in the request are
case sensitive or not (this will convert all values to lowercase)
:param bool store_missing: Whether the arguments default value should
be stored if the argument is missing from the request.
:param bool trim: If enabled, trims whitespace around the argument.
:param bool nullable: If enabled, allows null value in argument.
"""
def __init__(self,
name,
default=None,
dest=None,
required=False,
ignore=False,
type=None,
location=('json', 'form', 'args', 'files'),
choices=(),
action='store',
help=None,
operators=('=', ),
case_sensitive=True,
store_missing=True,
trim=False,
nullable=True,
ignore_invalid_usage=True):
self.name = name
self.default = default
self.dest = dest
self.required = required
self.ignore = ignore
self.location = location
self.type = type
self.choices = choices
self.action = action
self.help = help
self.case_sensitive = case_sensitive
self.operators = operators
self.store_missing = store_missing
self.trim = trim
self.nullable = nullable
self.ignore_invalid_usage = ignore_invalid_usage
def source(self, request):
"""Pulls values off the request in the provided location
if location is str:
json -> dict
form, args, file -> RequestParameters
if location is sequence:
return RequestParameters
:param request: The sanic request object to parse arguments from
"""
if isinstance(self.location, str):
try:
value = getattr(request, self.location, RequestParameters())
except InvalidUsage as e:
if self.ignore_invalid_usage:
return RequestParameters()
else:
raise e
if callable(value):
value = value()
if value:
return value
else:
values = RequestParameters()
for l in self.location:
value = getattr(request, l, None)
if callable(value):
value = value()
if value:
values.update(value)
return values
return RequestParameters()
def convert(self, value, op):
if self.location == "file":
return value
if not value:
if self.nullable:
return value
else:
raise ValueError("Must not be null")
try:
if not self.type:
return value
return self.type(value, self.name, op)
except TypeError:
try:
if self.type is decimal.Decimal:
return self.type(str(value), self.name)
else:
return self.type(value, self.name)
except TypeError:
return self.type(value)
def handle_validation_error(self, app, error, bundle_errors):
"""Called when an error is raised while parsing. Aborts the request
with a 400 status and an error message
:param error: the error that was raised
:param bundle_errors: do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
error_msg = self.help.format(error_msg=error) if self.help else error
msg = {self.name: error_msg}
if app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return error, msg
abort(status_code=400, message=msg)
def parse(self, request, req_temp, bundle_errors=False):
"""Parses argument value(s) from the request, converting according to
the argument's type.
:param request: The sanic request object to parse arguments from
:param do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
source = self.source(request)
results = []
# Sentinels
_not_found = False
_found = True
for operator in self.operators:
name = self.name + operator.replace("=", "", 1)
if name in source:
# Account for MultiDict and regular dict
if hasattr(source, "getlist") and not isinstance(source, Header):
values = source.getlist(name)
else:
values = source.get(name)
if not (isinstance(values, MutableSequence)
and self.action == 'append'):
values = [values]
for value in values:
if hasattr(value, "strip") and self.trim:
value = value.strip()
if hasattr(value, "lower") and not self.case_sensitive:
value = value.lower()
if hasattr(self.choices, "__iter__"):
self.choices = [choice.lower()
for choice in self.choices]
try:
value = self.convert(value, operator)
except Exception as error:
if self.ignore:
continue
return self.handle_validation_error(
request.app, error, bundle_errors)
if self.choices and value not in self.choices:
if request.app.config.get("BUNDLE_ERRORS",
False) or bundle_errors:
return self.handle_validation_error(
request.app,
ValueError("{0} is not a valid choice".format(
value)), bundle_errors)
self.handle_validation_error(
request.app,
ValueError(
"{0} is not a valid choice".format(value)),
bundle_errors)
if name in req_temp.unparsed_arguments:
req_temp.unparsed_arguments.pop(name)
results.append(value)
if not results and self.required:
if isinstance(self.location, str):
error_msg = "Missing required parameter in {0}".format(
_friendly_location.get(self.location, self.location)
)
else:
friendly_locations = [_friendly_location.get(loc, loc)
for loc in self.location]
error_msg = "Missing required parameter in {0}".format(
' or '.join(friendly_locations)
)
if request.app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(request.app,
ValueError(error_msg),
bundle_errors)
self.handle_validation_error(request.app, ValueError(error_msg),
bundle_errors)
if not results:
if callable(self.default):
return self.default(), _not_found
else:
return self.default, _not_found
if self.action == 'append':
return results, _found
if self.action == 'store' or len(results) == 1:
return results[0], _found
return results, _found
class RequestParser:
"""Enables adding and parsing of multiple arguments in the context of a
single request. Ex::
from sanic_restful_api import reqparse
parser = reqparse.RequestParser()
parser.add_argument('foo')
parser.add_argument('int_bar', type=int)
args = parser.parse_args()
:param bool trim: If enabled, trims whitespace on all arguments in this
parser
:param bool bundle_errors: If enabled, do not abort when first error
occurs, return a dict with the name of the argument and the error
message to be bundled and return all validation errors
"""
def __init__(self,
argument_cls=Argument,
namespace_cls=Namespace,
trim=False,
bundle_errors=False):
self.args = {}
self.argument_cls = argument_cls
self.namespace_cls = namespace_cls
self.trim = trim
self.bundle_errors = bundle_errors
def add_argument(self, *args, **kwargs) -> None:
"""Adds an argument to be parsed.
Accepts either a single instance of Argument or arguments to be passed
into :class:`Argument`'s constructor.
See :class:`Argument`'s constructor for documentation on the
available options.
"""
if len(args) == 1 and isinstance(args[0], self.argument_cls):
# self.args[args[0].name] = args[0]
argument_obj = args[0]
else:
argument_obj = self.argument_cls(*args, **kwargs)
if self.trim and self.argument_cls is Argument:
argument_obj.trim = kwargs.get('trim', self.trim)
if self.args.get(argument_obj.name):
raise RuntimeError('Argument is existed')
else:
self.args[argument_obj.name] = argument_obj
def parse_args(self, request: Request, strict=False):
"""Parse all arguments from the provided request and return the results
as a Namespace
:param strict: if req includes args not in parser,
throw 400 BadRequest exception
"""
namespace = self.namespace_cls()
# A record of arguments not yet parsed; as each is found
# among self.args, it will be popped out
req_temp = collections.namedtuple('RedType', 'unparsed_arguments')
req_temp.unparsed_arguments = dict(
self.argument_cls('').source(request)) if strict else {}
errors = {}
for name, arg in self.args.items():
value, found = arg.parse(request, req_temp, self.bundle_errors)
if isinstance(value, ValueError):
errors.update(found)
found = None
if found or arg.store_missing:
namespace[arg.dest or name] = value
if errors:
abort(status_code=400, message=errors)
if strict and req_temp.unparsed_arguments:
abort(
status_code=400, message='Unknown arguments: %s' % ', '.join(
req_temp.unparsed_arguments.keys()))
return namespace
def copy(self):
"""
Creates a copy of this RequestParser with
the same set of arguments
"""
parser_copy = self.__class__(self.argument_cls, self.namespace_cls)
parser_copy.args = deepcopy(self.args)
parser_copy.trim = self.trim
parser_copy.bundle_errors = self.bundle_errors
return parser_copy
def replace_argument(self, name, *args, **kwargs):
""" Replace the argument matching the given name with a new version."""
new_args = self.argument_cls(name, *args, **kwargs)
if self.args.get(name):
self.args[name] = new_args
else:
raise AttributeError('%s not existed' % name)
def remove_argument(self, name):
self.args.pop(name)
|
/sanic-restful-api-0.2.0.tar.gz/sanic-restful-api-0.2.0/sanic_restful_api/reqparse.py
| 0.76533 | 0.296132 |
reqparse.py
|
pypi
|
from calendar import timegm
from decimal import Decimal as MyDecimal, ROUND_HALF_EVEN
from email.utils import formatdate
import six
from sanic_restful_api import marshal
__all__ = ["String", "FormattedString", "DateTime", "Float",
"Integer", "Arbitrary", "Nested", "List", "Raw", "Boolean",
"Fixed", "Price"]
class MarshallingException(Exception):
"""
This is an encapsulating Exception in case of marshalling error.
"""
def __init__(self, underlying_exception):
# just put the contextual representation of the error to hint on what
# went wrong without exposing internals
super(MarshallingException, self).__init__(
six.text_type(underlying_exception))
def is_indexable_but_not_string(obj):
return not hasattr(obj, "strip") and hasattr(obj, "__iter__")
def get_value(key, obj, default=None):
"""Helper for pulling a keyed value off various types of objects"""
if isinstance(key, int):
return _get_value_for_key(key, obj, default)
elif callable(key):
return key(obj)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if is_indexable_but_not_string(obj):
try:
return obj[key]
except (IndexError, TypeError, KeyError):
pass
return getattr(obj, key, default)
def to_marshallable_type(obj):
"""Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type"""
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
return dict(obj.__dict__)
class Raw(object):
"""Raw provides a base field class from which others should extend. It
applies no formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a :class:`MarshallingException` in case of parsing problem.
:param default: The default value for the field, if no value is
specified.
:param attribute: If the public facing value differs from the internal
value, use this to retrieve a different attribute from the response
than the publicly named value.
"""
def __init__(self, default=None, attribute=None):
self.attribute = attribute
self.default = default
def format(self, value):
"""Formats a field's value. No-op by default - field classes that
modify how the value of existing object keys should be presented should
override this and apply the appropriate formatting.
:param value: The value to format
:exception MarshallingException: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
"""
return value
def output(self, key, obj):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result. If the key is not found
in the object, returns the default value. Field classes that create
values which do not require the existence of the key in the object
should override this and return the desired value.
:exception MarshallingException: In case of formatting problem
"""
value = get_value(
key if self.attribute is None else self.attribute, obj)
if value is None:
return self.default
return self.format(value)
class Nested(Raw):
"""Allows you to nest one set of fields inside another.
See :ref:`nested-field` for more information
:param dict nested: The dictionary to nest
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param kwargs: If ``default`` keyword argument is present, a nested
dictionary will be marshaled as its value if nested dictionary is
all-null keys (e.g. lets you return an empty JSON object instead of
null)
"""
def __init__(self, nested, allow_null=False, **kwargs):
self.nested = nested
self.allow_null = allow_null
super(Nested, self).__init__(**kwargs)
def output(self, key, obj):
value = get_value(
key if self.attribute is None else self.attribute, obj)
if value is None:
if self.allow_null:
return None
elif self.default is not None:
return self.default
return marshal(value, self.nested)
class List(Raw):
"""
Field for marshalling lists of other fields.
See :ref:`list-field` for more information.
:param cls_or_instance: The field type the list will contain.
"""
def __init__(self, cls_or_instance, **kwargs):
super(List, self).__init__(**kwargs)
error_msg = ("The type of the list elements must be a subclass of "
"sanic_restful_api.fields.Raw")
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance
def format(self, value):
# Convert all instances in typed list to container type
if isinstance(value, set):
value = list(value)
return [
self.container.output(idx,
val if (isinstance(val, dict)
or (self.container.attribute
and hasattr(val, self.container.attribute)))
and not isinstance(self.container, Nested)
and not type(self.container) is Raw
else value)
for idx, val in enumerate(value)
]
def output(self, key, data):
value = get_value(
key if self.attribute is None else self.attribute, data)
# we cannot really test for external dict behavior
if is_indexable_but_not_string(value) and not isinstance(value, dict):
return self.format(value)
if value is None:
return self.default
return [marshal(value, self.container.nested)]
class String(Raw):
"""
Marshal a value as a string. Uses ``six.text_type`` so values will
be converted to :class:`unicode` in python2 and :class:`str` in
python3.
"""
def format(self, value):
try:
return six.text_type(value)
except ValueError as ve:
raise MarshallingException(ve)
class Integer(Raw):
""" Field for outputting an integer value.
:param int default: The default value for the field, if no value is
specified.
"""
def __init__(self, default=0, **kwargs):
super(Integer, self).__init__(default=default, **kwargs)
def format(self, value):
try:
if value is None:
return self.default
return int(value)
except ValueError as ve:
raise MarshallingException(ve)
class Boolean(Raw):
"""
Field for outputting a boolean value.
Empty collections such as ``""``, ``{}``, ``[]``, etc. will be converted to
``False``.
"""
def format(self, value):
return bool(value)
class FormattedString(Raw):
"""
FormattedString is used to interpolate other values from
the response into this field. The syntax for the source string is
the same as the string :meth:`~str.format` method from the python
stdlib.
Ex::
fields = {
'name': fields.String,
'greeting': fields.FormattedString("Hello {name}")
}
data = {
'name': 'Doug',
}
marshal(data, fields)
"""
def __init__(self, src_str):
"""
:param string src_str: the string to format with the other
values from the response.
"""
super(FormattedString, self).__init__()
self.src_str = six.text_type(src_str)
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
raise MarshallingException(error)
class Float(Raw):
"""
A double as IEEE-754 double precision.
ex : 3.141592653589793 3.1415926535897933e-06 3.141592653589793e+24 nan inf
-inf
"""
def format(self, value):
try:
return float(value)
except ValueError as ve:
raise MarshallingException(ve)
class Arbitrary(Raw):
"""
A floating point number with an arbitrary precision
ex: 634271127864378216478362784632784678324.23432
"""
def format(self, value):
return six.text_type(MyDecimal(value))
class DateTime(Raw):
"""
Return a formatted datetime string in UTC. Supported formats are RFC 822
and ISO 8601.
See :func:`email.utils.formatdate` for more info on the RFC 822 format.
See :meth:`datetime.datetime.isoformat` for more info on the ISO 8601
format.
:param dt_format: ``'rfc822'`` or ``'iso8601'``
:type dt_format: str
"""
def __init__(self, dt_format='rfc822', **kwargs):
super(DateTime, self).__init__(**kwargs)
self.dt_format = dt_format
def format(self, value):
try:
if self.dt_format == 'rfc822':
return _rfc822(value)
elif self.dt_format == 'iso8601':
return _iso8601(value)
else:
raise MarshallingException(
'Unsupported date format %s' % self.dt_format
)
except AttributeError as ae:
raise MarshallingException(ae)
ZERO = MyDecimal()
class Fixed(Raw):
"""
A decimal number with a fixed precision.
"""
def __init__(self, decimals=5, **kwargs):
super(Fixed, self).__init__(**kwargs)
self.precision = MyDecimal('0.' + '0' * (decimals - 1) + '1')
def format(self, value):
dvalue = MyDecimal(value)
if not dvalue.is_normal() and dvalue != ZERO:
raise MarshallingException('Invalid Fixed precision number.')
return six.text_type(dvalue.quantize(self.precision, rounding=ROUND_HALF_EVEN))
"""Alias for :class:`~fields.Fixed`"""
Price = Fixed
def _rfc822(dt):
"""Turn a datetime object into a formatted date.
Example::
fields._rfc822(datetime(2011, 1, 1)) => "Sat, 01 Jan 2011 00:00:00 -0000"
:param dt: The datetime to transform
:type dt: datetime
:return: A RFC 822 formatted date string
"""
return formatdate(timegm(dt.utctimetuple()))
def _iso8601(dt):
"""Turn a datetime object into an ISO8601 formatted date.
Example::
fields._iso8601(datetime(2012, 1, 1, 0, 0)) => "2012-01-01T00:00:00"
:param dt: The datetime to transform
:type dt: datetime
:return: A ISO 8601 formatted date string
"""
return dt.isoformat()
|
/sanic-restful-api-0.2.0.tar.gz/sanic-restful-api-0.2.0/sanic_restful_api/fields.py
| 0.834643 | 0.206814 |
fields.py
|
pypi
|
from collections import OrderedDict
from functools import wraps
from sanic_restful import Resource
from sanic_restful.util import unpack
def marshal(data, fields, envelope=None):
"""Takes raw data (in the form of a dict, list, object) and a dict of
fields to output and filters the data based on those fields.
:param data: the actual object(s) from which the fields are taken from
:param fields: a dict of whose keys will make up the final serialized
response output
:param envelope: optional key that will be used to envelop the serialized
response
>>> from flask_restful import fields, marshal
>>> data = { 'a': 100, 'b': 'foo' }
>>> mfields = { 'a': fields.Raw }
>>> marshal(data, mfields)
OrderedDict([('a', 100)])
>>> marshal(data, mfields, envelope='data')
OrderedDict([('data', OrderedDict([('a', 100)]))])
"""
def make(cls):
if isinstance(cls, type):
return cls()
return cls
if isinstance(data, (list, tuple)):
return (OrderedDict([(envelope, [marshal(d, fields) for d in data])])
if envelope else [marshal(d, fields) for d in data])
items = ((k, marshal(data, v)
if isinstance(v, dict) else make(v).output(k, data))
for k, v in fields.items())
return OrderedDict(
[(envelope, OrderedDict(items))]) if envelope else OrderedDict(items)
class marshal_with(object):
"""A decorator that apply marshalling to the return values of your methods.
>>> from flask_restful import fields, marshal_with
>>> mfields = { 'a': fields.Raw }
>>> @marshal_with(mfields)
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('a', 100)])
>>> @marshal_with(mfields, envelope='data')
... def get():
... return { 'a': 100, 'b': 'foo' }
...
...
>>> get()
OrderedDict([('data', OrderedDict([('a', 100)]))])
see :meth:`flask_restful.marshal`
"""
def __init__(self, fields, envelope=None):
"""
:param fields: a dict of whose keys will make up the final
serialized response output
:param envelope: optional key that will be used to envelop the
serialized response
"""
self.fields = fields
self.envelope = envelope
def __call__(self, f):
@wraps(f)
async def wrapper(*args, **kwargs):
_cls = args[0] if args else None
if isinstance(_cls, Resource):
pass
resp = await f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
return marshal(data, self.fields, self.envelope), code, headers
else:
return marshal(resp, self.fields, self.envelope)
return wrapper
class marshal_with_field:
"""
A decorator that formats the return values of your methods
with a single field.
>>> from flask_restful import marshal_with_field, fields
>>> @marshal_with_field(fields.List(fields.Integer))
... def get():
... return ['1', 2, 3.0]
...
>>> get()
[1, 2, 3]
see :meth:`flask_restful.marshal_with`
"""
def __init__(self, field):
"""
:param field: a single field with which to marshal the output.
"""
if isinstance(field, type):
self.field = field()
else:
self.field = field
def __call__(self, f):
@wraps(f)
async def wrapper(*args, **kwargs):
resp = await f(*args, **kwargs)
if isinstance(resp, tuple):
data, code, headers = unpack(resp)
return self.field.format(data), code, headers
return self.field.format(resp)
return wrapper
|
/sanic-restful-0.1.1.tar.gz/sanic-restful-0.1.1/sanic_restful/marshal.py
| 0.901704 | 0.665451 |
marshal.py
|
pypi
|
from collections import OrderedDict
from functools import wraps
from sanic import Blueprint, Sanic
from sanic.exceptions import ServerError
from sanic.response import BaseHTTPResponse, text
from sanic_restful.exceptions import NotAcceptable
from sanic_restful.output import output_json
from sanic_restful.util import unpack
from werkzeug.http import parse_accept_header
DEFAULT_REPRESENTATIONS = [
('application/json', output_json)
]
class Api:
"""
The main entry point for the application.
You need to initialize it with a Flask Application: ::
>>> app = Flask(__name__)
>>> api = restful.Api(app)
Alternatively, you can use :meth:`init_app` to set the Flask application
after it has been constructed.
:param app: the Flask application object
:type app: flask.Flask or flask.Blueprint
:param prefix: Prefix all routes with a value, eg v1 or 2010-04-01
:type prefix: str
:param default_mediatype: The default media type to return
:type default_mediatype: str
:param decorators: Decorators to attach to every resource
:type decorators: list
:param catch_all_404s: Use :meth:`handle_error`
to handle 404 errors throughout your app
:param serve_challenge_on_401: Whether to serve a challenge response to
clients on receiving 401. This usually leads to a username/password
popup in web browers.
:param url_part_order: A string that controls the order that the pieces
of the url are concatenated when the full url is constructed. 'b'
is the blueprint (or blueprint registration) prefix, 'a' is the api
prefix, and 'e' is the path component the endpoint is added with
:type catch_all_404s: bool
:param errors: A dictionary to define a custom response for each
exception or error raised during a request
:type errors: dict
"""
def __init__(self,
app=None,
prefix='',
default_mediatype="application/json",
decorators=None,
catch_all_404s=False,
serve_challenge_on_401=False,
url_part_order="bae",
errors=None):
self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)
self.urls = {}
self.prefix = prefix
self.default_mediatype = default_mediatype
self.decorators = decorators if decorators else []
self.catch_all_404s = catch_all_404s
self.serve_challenge_on_401 = serve_challenge_on_401
self.url_part_order = url_part_order
self.errors = errors or {}
self.blueprint_setup = None
self.endpoints = set()
self.resources = []
self.app = None
self.blueprint = None
if app:
self.app = app
self.init_app(app)
def init_app(self, app):
"""Initialize this class with the given :class:`flask.Flask`
application or :class:`flask.Blueprint` object.
:param app: the Flask application or blueprint object
Examples::
api = Api()
api.add_resource(...)
api.init_app(app)
"""
# If app is a blueprint, defer the initialization
if isinstance(app, Blueprint):
# self.blueprint = app
self._bp_register = app.register
# TODO: register api resource for bp that call add resource function
app.register = self._sanic_blueprint_register_hook(app)
elif isinstance(app, Sanic):
self.register_api(app)
else:
raise TypeError("only support sanic object and blupirint")
def _sanic_blueprint_register_hook(self, bp: Blueprint):
def register(app, options):
bp_obj = self._bp_register(app, options)
self.register_api(bp)
return bp_obj
return register
def register_api(self, app):
if len(self.resources) > 0:
for resource, urls, kwargs in self.resources:
self._register_view(app, resource, *urls, **kwargs)
def _register_view(self, app, resource, *urls, **kwargs):
endpoint = kwargs.pop("endpoint", None) or resource.__name__.lower()
self.endpoints.add(endpoint)
resource_class_args = kwargs.pop("resource_class_args", ())
resource_class_kwargs = kwargs.pop("resource_class_kwargs", {})
# Why?
# resouce.mediatypes = self.mediatypes
resource.endpoint = endpoint
resource_func = self.output(
resource.as_view(self, *resource_class_args,
**resource_class_kwargs))
for decorator in self.decorators:
resource_func = decorator(resource_func)
for url in urls:
rule = self._complete_url(url, '')
# Add the url to the application or blueprint
app.add_route(uri=rule, handler=resource_func, **kwargs)
@property
def mediatypes(self):
return [
"application/json",
"text/plain; charset=utf-8",
"application/octet-stream",
"text/html; charset=utf-8",
]
def output(self, resource):
"""Wraps a resource (as a flask view function), for cases where the
resource does not directly return a response object
:param resource: The resource as a flask view function
"""
@wraps(resource)
async def wrapper(request, *args, **kwargs):
resp = await resource(request, *args, **kwargs)
if isinstance(resp, BaseHTTPResponse):
return resp
else:
data, code, headers = unpack(resp)
return self.make_response(request, data, code, headers=headers)
return wrapper
def make_response(self, request, data, *args, **kwargs):
"""Looks up the representation transformer for the requested media
type, invoking the transformer to create a response object. This
defaults to default_mediatype if no transformer is found for the
requested mediatype. If default_mediatype is None, a 406 Not
Acceptable response will be sent as per RFC 2616 section 14.1
:param data: Python object containing response data to be transformed
"""
default_mediatype = kwargs.pop("fallback_mediatype",
None) or self.default_mediatype
# mediatype = best_match_accept_mimetype(
# request, self.representations, default=default_mediatype)
mediatype = parse_accept_header(request.headers.get(
'accept', None)).best_match(
self.representations, default=default_mediatype)
if not mediatype:
raise NotAcceptable("Not Acceptable")
if mediatype in self.representations:
resp = self.representations[mediatype](request.app, data, *args,
**kwargs)
resp.headers["Content-type"] = mediatype
return resp
elif mediatype == "text/plain":
resp = text(str(data), *args, **kwargs)
return resp
else:
raise ServerError(None)
def _complete_url(self, url_part, registration_prefix):
"""This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint.
:param url_part: The part of the url the endpoint is registered with
:param registration_prefix: The part of the url contributed by the
blueprint. Generally speaking, BlueprintSetupState.url_prefix
"""
parts = {'b': registration_prefix, 'a': self.prefix, 'e': url_part}
return ''.join(parts[key] for key in self.url_part_order if parts[key])
def add_resource(self, resource, *urls, **kwargs):
"""Adds a resource to the api.
:param resource: the class name of your resource
:type resource: :class:`Resource`
:param urls: one or more url routes to match for the resource, standard
flask routing rules apply. Any url variables will be
passed to the resource method as args.
:type urls: str
:param endpoint: endpoint name
(defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:type endpoint: str
:param resource_class_args: args to be forwarded to the constructor of
the resource.
:type resource_class_args: tuple
:param resource_class_kwargs: kwargs to be forwarded to the constructor
of the resource.
:type resource_class_kwargs: dict
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
api.add_resource(HelloWorld, '/', '/hello')
api.add_resource(Foo, '/foo', endpoint="foo")
api.add_resource(FooSpecial, '/special/foo', endpoint="foo")
"""
if self.app:
self._register_view(self.app, resource, *urls, **kwargs)
else:
self.resources.append((resource, urls, kwargs))
def resource(self, *urls, **kwargs):
"""Wraps a :class:`~flask_restful.Resource` class, adding it to the
api. Parameters are the same as :meth:`~flask_restful.Api.add_resource`
Example::
app = Flask(__name__)
api = restful.Api(app)
@api.resource('/foo')
class Foo(Resource):
def get(self):
return 'Hello, World!'
"""
def decorator(cls):
self.add_resource(cls, *urls, **kwargs)
return cls
return decorator
def representation(self, mediatype):
"""Allows additional representation transformers to be declared for the
api. Transformers are functions that must be decorated with this
method, passing the mediatype the transformer represents. Three
arguments are passed to the transformer:
* The data to be represented in the response body
* The http status code
* A dictionary of headers
The transformer should convert the data appropriately for the mediatype
and return a Flask response object.
Ex::
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(convert_data_to_xml(data), code)
resp.headers.extend(headers)
return resp
"""
def wrapper(func):
self.representations[mediatype] = func
return func
return wrapper
|
/sanic-restful-0.1.1.tar.gz/sanic-restful-0.1.1/sanic_restful/api.py
| 0.804329 | 0.177811 |
api.py
|
pypi
|
from calendar import timegm
from decimal import Decimal as MyDecimal, ROUND_HALF_EVEN
from email.utils import formatdate
from sanic_restful.marshal import marshal
__all__ = ["String", "FormattedString", "DateTime", "Float",
"Integer", "Arbitrary", "Nested", "List", "Raw", "Boolean",
"Fixed", "Price"]
class MarshallingException(Exception):
"""
This is an encapsulating Exception in case of marshalling error.
"""
def __init__(self, underlying_exception):
# just put the contextual representation of the error to hint on what
# went wrong without exposing internals
super().__init__(str(underlying_exception))
def is_indexable_but_not_string(obj):
return not hasattr(obj, "strip") and hasattr(obj, "__iter__")
def get_value(key, obj, default=None):
"""Helper for pulling a keyed value off various types of objects"""
if isinstance(key, int):
return _get_value_for_key(key, obj, default)
elif callable(key):
return key(obj)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if is_indexable_but_not_string(obj):
try:
return obj[key]
except (IndexError, TypeError, KeyError):
pass
return getattr(obj, key, default)
def to_marshallable_type(obj):
"""Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type"""
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
return dict(obj.__dict__)
class Raw(object):
"""Raw provides a base field class from which others should extend. It
applies no formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a :class:`MarshallingException` in case of parsing problem.
:param default: The default value for the field, if no value is
specified.
:param attribute: If the public facing value differs from the internal
value, use this to retrieve a different attribute from the response
than the publicly named value.
"""
def __init__(self, default=None, attribute=None):
self.attribute = attribute
self.default = default
def format(self, value):
"""Formats a field's value. No-op by default - field classes that
modify how the value of existing object keys should be presented should
override this and apply the appropriate formatting.
:param value: The value to format
:exception MarshallingException: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
"""
return value
def output(self, key, obj):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result. If the key is not found
in the object, returns the default value. Field classes that create
values which do not require the existence of the key in the object
should override this and return the desired value.
:exception MarshallingException: In case of formatting problem
"""
value = get_value(key
if self.attribute is None else self.attribute, obj)
if value is None:
return self.default
return self.format(value)
class Nested(Raw):
"""Allows you to nest one set of fields inside another.
See :ref:`nested-field` for more information
:param dict nested: The dictionary to nest
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param kwargs: If ``default`` keyword argument is present, a nested
dictionary will be marshaled as its value if nested dictionary is
all-null keys (e.g. lets you return an empty JSON object instead of
null)
"""
def __init__(self, nested, allow_null=False, **kwargs):
self.nested = nested
self.allow_null = allow_null
super().__init__(**kwargs)
def output(self, key, obj):
value = get_value(key
if self.attribute is None else self.attribute, obj)
if value is None:
if self.allow_null:
return None
elif self.default is not None:
return self.default
return marshal(value, self.nested)
class List(Raw):
"""
Field for marshalling lists of other fields.
See :ref:`list-field` for more information.
:param cls_or_instance: The field type the list will contain.
"""
def __init__(self, cls_or_instance, **kwargs):
super().__init__(**kwargs)
error_msg = ("The type of the list elements must be a subclass of "
"flask_restful.fields.Raw")
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance
def format(self, value):
# Convert all instances in typed list to container type
if isinstance(value, set):
value = list(value)
return [
self.container.output(
idx, val if (isinstance(val, dict) or
(self.container.attribute
and hasattr(val, self.container.attribute)))
and not isinstance(self.container, Nested)
and not type(self.container) is Raw else value)
for idx, val in enumerate(value)
]
def output(self, key, data):
value = get_value(key
if self.attribute is None else self.attribute, data)
# we cannot really test for external dict behavior
if is_indexable_but_not_string(value) and not isinstance(value, dict):
return self.format(value)
if value is None:
return self.default
return [marshal(value, self.container.nested)]
class String(Raw):
"""
Marshal a value as a string. Uses ``six.text_type`` so values will
be converted to :class:`unicode` in python2 and :class:`str` in
python3.
"""
def format(self, value):
try:
return str(value)
except ValueError as ve:
raise MarshallingException(ve)
class Integer(Raw):
""" Field for outputting an integer value.
:param int default: The default value for the field, if no value is
specified.
"""
def __init__(self, default=0, **kwargs):
super().__init__(default=default, **kwargs)
def format(self, value):
try:
if value is None:
return self.default
return int(value)
except ValueError as ve:
raise MarshallingException(ve)
class Boolean(Raw):
"""
Field for outputting a boolean value.
Empty collections such as ``""``, ``{}``, ``[]``, etc. will be converted to
``False``.
"""
def format(self, value):
return bool(value)
class FormattedString(Raw):
"""
FormattedString is used to interpolate other values from
the response into this field. The syntax for the source string is
the same as the string :meth:`~str.format` method from the python
stdlib.
Ex::
fields = {
'name': fields.String,
'greeting': fields.FormattedString("Hello {name}")
}
data = {
'name': 'Doug',
}
marshal(data, fields)
"""
def __init__(self, src_str):
"""
:param string src_str: the string to format with the other
values from the response.
"""
super().__init__()
self.src_str = str(src_str)
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
raise MarshallingException(error)
class Float(Raw):
"""
A double as IEEE-754 double precision.
ex : 3.141592653589793 3.1415926535897933e-06 3.141592653589793e+24 nan inf
-inf
"""
def format(self, value):
try:
return float(value)
except ValueError as ve:
raise MarshallingException(ve)
class Arbitrary(Raw):
"""
A floating point number with an arbitrary precision
ex: 634271127864378216478362784632784678324.23432
"""
def format(self, value):
return str(MyDecimal(value))
class DateTime(Raw):
"""
Return a formatted datetime string in UTC. Supported formats are RFC 822
and ISO 8601.
See :func:`email.utils.formatdate` for more info on the RFC 822 format.
See :meth:`datetime.datetime.isoformat` for more info on the ISO 8601
format.
:param dt_format: ``'rfc822'`` or ``'iso8601'``
:type dt_format: str
"""
def __init__(self, dt_format='rfc822', **kwargs):
super().__init__(**kwargs)
self.dt_format = dt_format
def format(self, value):
try:
if self.dt_format == 'rfc822':
return _rfc822(value)
elif self.dt_format == 'iso8601':
return _iso8601(value)
else:
raise MarshallingException(
'Unsupported date format %s' % self.dt_format
)
except AttributeError as ae:
raise MarshallingException(ae)
ZERO = MyDecimal()
class Fixed(Raw):
"""
A decimal number with a fixed precision.
"""
def __init__(self, decimals=5, **kwargs):
super().__init__(**kwargs)
self.precision = MyDecimal('0.' + '0' * (decimals - 1) + '1')
def format(self, value):
dvalue = MyDecimal(value)
if not dvalue.is_normal() and dvalue != ZERO:
raise MarshallingException('Invalid Fixed precision number.')
return str(
dvalue.quantize(self.precision, rounding=ROUND_HALF_EVEN))
"""Alias for :class:`~fields.Fixed`"""
Price = Fixed
def _rfc822(dt):
"""Turn a datetime object into a formatted date.
Example::
fields._rfc822(datetime(2011, 1, 1)) => "Sat, 01 Jan 2011 00:00:00 -0000"
:param dt: The datetime to transform
:type dt: datetime
:return: A RFC 822 formatted date string
"""
return formatdate(timegm(dt.utctimetuple()))
def _iso8601(dt):
"""Turn a datetime object into an ISO8601 formatted date.
Example::
fields._iso8601(datetime(2012, 1, 1, 0, 0)) => "2012-01-01T00:00:00"
:param dt: The datetime to transform
:type dt: datetime
:return: A ISO 8601 formatted date string
"""
return dt.isoformat()
|
/sanic-restful-0.1.1.tar.gz/sanic-restful-0.1.1/sanic_restful/fields.py
| 0.85132 | 0.235284 |
fields.py
|
pypi
|
import inspect
import warnings
from collections import namedtuple
from sanic.constants import HTTP_METHODS
from .errors import abort
from .marshalling import marshal, marshal_with
from .model import Model, OrderedModel, SchemaModel
from .reqparse import RequestParser
from .utils import merge
from ._http import HTTPStatus
# Container for each route applied to a Resource using @ns.route decorator
ResourceRoute = namedtuple("ResourceRoute", "resource urls route_doc kwargs")
class Namespace(object):
'''
Group resources together.
Namespace is to API what :class:`flask:flask.Blueprint` is for :class:`flask:flask.Flask`.
:param str name: The namespace name
:param str description: An optional short description
:param str path: An optional prefix path. If not provided, prefix is ``/+name``
:param list decorators: A list of decorators to apply to each resources
:param bool validate: Whether or not to perform validation on this namespace
:param bool ordered: Whether or not to preserve order on models and marshalling
:param Api api: an optional API to attache to the namespace
'''
def __init__(self, name, description=None, path=None, decorators=None, validate=None,
authorizations=None, ordered=False, **kwargs):
self.name = name
self.description = description
self._path = path
self._schema = None
self._validate = validate
self.models = {}
self.urls = {}
self.decorators = decorators if decorators else []
self.resources = [] # List[ResourceRoute]
self.error_handlers = {}
self.default_error_handler = None
self.authorizations = authorizations
self.ordered = ordered
self.apis = []
if 'api' in kwargs:
self.apis.append(kwargs['api'])
@property
def path(self):
return (self._path or ('/' + self.name)).rstrip('/')
def add_resource(self, resource, *urls, **kwargs):
'''
Register a Resource for a given API Namespace
:param Resource resource: the resource ro register
:param str urls: one or more url routes to match for the resource,
standard flask routing rules apply.
Any url variables will be passed to the resource method as args.
:param str endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :class:`fields.Url` fields
:param list|tuple resource_class_args: args to be forwarded to the constructor of the resource.
:param dict resource_class_kwargs: kwargs to be forwarded to the constructor of the resource.
Additional keyword arguments not specified above will be passed as-is
to :meth:`flask.Flask.add_url_rule`.
Examples::
namespace.add_resource(HelloWorld, '/', '/hello')
namespace.add_resource(Foo, '/foo', endpoint="foo")
namespace.add_resource(FooSpecial, '/special/foo', endpoint="foo")
'''
route_doc = kwargs.pop('route_doc', {})
self.resources.append(ResourceRoute(resource, urls, route_doc, kwargs))
for api in self.apis:
ns_urls = api.ns_urls(self, urls)
api.register_resource(self, resource, *ns_urls, **kwargs)
def route(self, *urls, **kwargs):
'''
A decorator to route resources.
'''
def wrapper(cls):
doc = kwargs.pop('doc', None)
if doc is not None:
# build api doc intended only for this route
kwargs['route_doc'] = self._build_doc(cls, doc)
self.add_resource(cls, *urls, **kwargs)
return cls
return wrapper
def _build_doc(self, cls, doc):
if doc is False:
return False
unshortcut_params_description(doc)
handle_deprecations(doc)
for http_method in {str(m).lower() for m in HTTP_METHODS}:
if http_method in doc:
if doc[http_method] is False:
continue
unshortcut_params_description(doc[http_method])
handle_deprecations(doc[http_method])
if 'expect' in doc[http_method] and not isinstance(doc[http_method]['expect'], (list, tuple)):
doc[http_method]['expect'] = [doc[http_method]['expect']]
return merge(getattr(cls, '__apidoc__', {}), doc)
def doc(self, shortcut=None, **kwargs):
'''A decorator to add some api documentation to the decorated object'''
if isinstance(shortcut, str):
kwargs['id'] = shortcut
show = shortcut if isinstance(shortcut, bool) else True
def wrapper(documented):
documented.__apidoc__ = self._build_doc(
documented,
kwargs if show else False
)
return documented
return wrapper
def hide(self, func):
'''A decorator to hide a resource or a method from specifications'''
return self.doc(False)(func)
def abort(self, *args, **kwargs):
'''
Properly abort the current request
See: :func:`~sanic_restplus.errors.abort`
'''
abort(*args, **kwargs)
def add_model(self, name, definition):
self.models[name] = definition
for api in self.apis:
api.models[name] = definition
return definition
def model(self, name=None, model=None, mask=None, **kwargs):
'''
Register a model
.. seealso:: :class:`Model`
'''
cls = OrderedModel if self.ordered else Model
model = cls(name, model, mask=mask)
model.__apidoc__.update(kwargs)
return self.add_model(name, model)
def schema_model(self, name=None, schema=None):
'''
Register a model
.. seealso:: :class:`Model`
'''
model = SchemaModel(name, schema)
return self.add_model(name, model)
def extend(self, name, parent, fields):
'''
Extend a model (Duplicate all fields)
:deprecated: since 0.9. Use :meth:`clone` instead
'''
if isinstance(parent, list):
parents = parent + [fields]
model = Model.extend(name, *parents)
else:
model = Model.extend(name, parent, fields)
return self.add_model(name, model)
def clone(self, name, *specs):
'''
Clone a model (Duplicate all fields)
:param str name: the resulting model name
:param specs: a list of models from which to clone the fields
.. seealso:: :meth:`Model.clone`
'''
model = Model.clone(name, *specs)
return self.add_model(name, model)
def inherit(self, name, *specs):
'''
Inherit a modal (use the Swagger composition pattern aka. allOf)
.. seealso:: :meth:`Model.inherit`
'''
model = Model.inherit(name, *specs)
return self.add_model(name, model)
def expect(self, *inputs, **kwargs):
'''
A decorator to Specify the expected input model
:param ModelBase|Parse inputs: An expect model or request parser
:param bool validate: whether to perform validation or not
'''
expect = []
params = {
'validate': kwargs.get('validate', self._validate),
'expect': expect
}
for param in inputs:
expect.append(param)
return self.doc(**params)
def parser(self):
'''Instanciate a :class:`~RequestParser`'''
return RequestParser()
def as_list(self, field):
'''Allow to specify nested lists for documentation'''
field.__apidoc__ = merge(getattr(field, '__apidoc__', {}), {'as_list': True})
return field
def marshal_with(self, fields, as_list=False, code=HTTPStatus.OK, description=None, **kwargs):
'''
A decorator specifying the fields to use for serialization.
:param bool as_list: Indicate that the return type is a list (for the documentation)
:param int code: Optionally give the expected HTTP response code if its different from 200
'''
code = int(code)
doc = {
'responses': {
str(code): (description, [fields]) if as_list else (description, fields)
},
'__mask__': kwargs.get('mask', True), # Mask values can't be determined outside app context
}
real_marshal_with = marshal_with(fields, ordered=self.ordered, **kwargs)
def wrapper(func):
nonlocal doc
func.__apidoc__ = merge(getattr(func, '__apidoc__', {}), doc)
return real_marshal_with(func)
return wrapper
def marshal_list_with(self, fields, **kwargs):
'''A shortcut decorator for :meth:`~Api.marshal_with` with ``as_list=True``'''
return self.marshal_with(fields, True, **kwargs)
def marshal(self, *args, **kwargs):
'''A shortcut to the :func:`marshal` helper'''
return marshal(*args, **kwargs)
def errorhandler(self, exception):
'''A decorator to register an error handler for a given exception'''
if inspect.isclass(exception) and issubclass(exception, Exception):
# Register an error handler for a given exception
def wrapper(func):
self.error_handlers[exception] = func
return func
return wrapper
else:
# Register the default error handler
self.default_error_handler = exception
return exception
def param(self, name, description=None, _in='query', **kwargs):
'''
A decorator to specify one of the expected parameters
:param str name: the parameter name
:param str description: a small description
:param str _in: the parameter location `(query|header|formData|body|cookie)`
'''
param = kwargs
param['in'] = _in
param['description'] = description
return self.doc(params={name: param})
def response(self, code, description, model=None, **kwargs):
'''
A decorator to specify one of the expected responses
:param int code: the HTTP status code
:param str description: a small description about the response
:param ModelBase model: an optional response model
'''
return self.doc(responses={code: (description, model, kwargs)})
def header(self, name, description=None, **kwargs):
'''
A decorator to specify one of the expected headers
:param str name: the HTTP header name
:param str description: a description about the header
'''
header = {'description': description}
header.update(kwargs)
return self.doc(headers={name: header})
def produces(self, mimetypes):
'''A decorator to specify the MIME types the API can produce'''
return self.doc(produces=mimetypes)
def deprecated(self, func):
'''A decorator to mark a resource or a method as deprecated'''
return self.doc(deprecated=True)(func)
def vendor(self, *args, **kwargs):
'''
A decorator to expose vendor extensions.
Extensions can be submitted as dict or kwargs.
The ``x-`` prefix is optionnal and will be added if missing.
See: http://swagger.io/specification/#specification-extensions-128
'''
for arg in args:
kwargs.update(arg)
return self.doc(vendor=kwargs)
def payload(self, request):
'''Store the input payload in the current request context'''
return request.json
def unshortcut_params_description(data):
if 'params' in data:
for name, description in data['params'].items():
if isinstance(description, str):
data['params'][name] = {'description': description}
def handle_deprecations(doc):
if 'parser' in doc:
warnings.warn('The parser attribute is deprecated, use expect instead', DeprecationWarning, stacklevel=2)
doc['expect'] = doc.get('expect', []) + [doc.pop('parser')]
if 'body' in doc:
warnings.warn('The body attribute is deprecated, use expect instead', DeprecationWarning, stacklevel=2)
doc['expect'] = doc.get('expect', []) + [doc.pop('body')]
|
/sanic-restplus-0.6.4.tar.gz/sanic-restplus-0.6.4/sanic_restplus/namespace.py
| 0.83346 | 0.184694 |
namespace.py
|
pypi
|
import inspect
from asyncio import iscoroutinefunction
from sanic.views import HTTPMethodView
from sanic.response import BaseHTTPResponse
from sanic.constants import HTTP_METHODS
from .model import ModelBase
from .utils import unpack, best_match_accept_mimetype
class MethodViewExt(HTTPMethodView):
methods = None
method_has_context = None
@classmethod
def as_view_named(cls, endpoint_name, *class_args, **class_kwargs):
"""Return view function for use with the routing system, that
dispatches request to appropriate handler method.
"""
view = super(MethodViewExt, cls).as_view(*class_args, **class_kwargs)
view.__name__ = endpoint_name
return view
class ResourceMeta(type):
def __new__(mcs, name, bases, d):
p_type = type.__new__(mcs, name, bases, d)
if 'methods' not in d:
methods = set(p_type.methods or [])
method_has_context = p_type.method_has_context or {}
for m in HTTP_METHODS:
ml = m.lower()
func = d.get(ml, None)
if func:
methods.add(m)
s = inspect.signature(func)
if len(s.parameters) < 3:
continue
# We have more than just 'self' and 'request'
p = iter(s.parameters.items())
next(p) # self/cls
next(p) # request
p = list(p)
for (i, (k, v)) in enumerate(p):
if v.name == "context":
if v.default == v.empty:
method_has_context[m] = i+2
else:
method_has_context[m] = 'k'
continue
# If we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the base class or another subclass of a base method view
# that does not introduce new methods).
if methods:
p_type.methods = sorted(methods)
p_type.method_has_context = method_has_context
return p_type
class Resource(MethodViewExt, metaclass=ResourceMeta):
"""
Represents an abstract sanic_restplus.Resource.
Concrete resources should extend from this class
and expose methods for each supported HTTP method.
If a resource is invoked with an unsupported HTTP method,
the API will return a response with status 405 Method Not Allowed.
Otherwise the appropriate method is called and passed all arguments
from the url rule used when adding the resource to an Api instance.
See :meth:`~sanic_restplus.Api.add_resource` for details.
"""
representations = None
method_decorators = []
def __init__(self, api=None, *args, **kwargs):
self.api = api
async def dispatch_request(self, request, *args, **kwargs):
context = kwargs.pop('context', None)
has_context = bool(context)
requestmethod = request.method
meth = getattr(self, requestmethod.lower(), None)
if meth is None and requestmethod == 'HEAD':
meth = getattr(self, 'get', None)
requestmethod = 'GET'
elif meth is None and requestmethod == 'OPTIONS':
meth = getattr(self, 'get', None)
requestmethod = 'GET'
assert meth is not None, 'Unimplemented method {0!r}'.format(requestmethod)
method_has_context = self.method_has_context.get(requestmethod, False)
for decorator in self.method_decorators:
meth = decorator(meth)
self.validate_payload(request, meth)
if has_context and method_has_context:
if method_has_context == 'k' or len(kwargs) > 0:
kwargs.setdefault('context', context)
else:
pos = int(method_has_context) - 2 # skip self and request
args = list(args)
args.insert(pos, context)
do_await = iscoroutinefunction(meth)
resp = meth(request, *args, **kwargs)
if do_await:
resp = await resp
if not resp: # Sanic 21.3+ can stream its own response, so it would return None
return resp
resp_type = type(resp)
if issubclass(resp_type, BaseHTTPResponse):
return resp
elif inspect.isawaitable(resp):
# Still have a coroutine or awaitable even after waiting.
# let the output handler handle it
return resp
representations = self.representations or {}
mediatype = best_match_accept_mimetype(request, representations, default=None)
if mediatype in representations:
# resp might be a coroutine. Wait for it
data, code, headers = unpack(resp)
resp = representations[mediatype](data, code, headers)
resp.headers['Content-Type'] = mediatype
return resp
return resp
def __validate_payload(self, request, expect, collection=False):
'''
:param ModelBase expect: the expected model for the input payload
:param bool collection: False if a single object of a resource is
expected, True if a collection of objects of a resource is expected.
'''
# TODO: proper content negotiation
data = request.json
if collection:
data = data if isinstance(data, list) else [data]
for obj in data:
expect.validate(obj, self.api.refresolver, self.api.format_checker)
else:
expect.validate(data, self.api.refresolver, self.api.format_checker)
def validate_payload(self, request, func):
'''Perform a payload validation on expected model if necessary'''
if getattr(func, '__apidoc__', False) is not False:
doc = func.__apidoc__
validate = doc.get('validate', None)
validate = validate if validate is not None else self.api._validate
if validate:
for expect in doc.get('expect', []):
# TODO: handle third party handlers
if isinstance(expect, list) and len(expect) == 1:
if isinstance(expect[0], ModelBase):
self.__validate_payload(request, expect[0], collection=True)
if isinstance(expect, ModelBase):
self.__validate_payload(request, expect, collection=False)
|
/sanic-restplus-0.6.4.tar.gz/sanic-restplus-0.6.4/sanic_restplus/resource.py
| 0.554953 | 0.195844 |
resource.py
|
pypi
|
from enum import IntEnum
class HTTPStatus(IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
"""
def __new__(cls, value, phrase, description=''):
"""
:param value:
:type value: int
:param phrase:
:type phrase: str
:param description:
:type description: str
"""
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase
obj.description = description
return obj
def __str__(self):
return str(self.value)
def __hash__(self):
return self.value.__hash__()
def __eq__(self, other):
if isinstance(other, HTTPStatus):
return self.value == other.value
elif isinstance(other, int):
return self.value == other
elif isinstance(other, (tuple, list)) and len(other) > 1:
return self.value == other[0]
return False
def __gt__(self, other):
if isinstance(other, HTTPStatus):
return self.value > other.value
elif isinstance(other, int):
return self.value > other
elif isinstance(other, (tuple, list)) and len(other) > 1:
return self.value > other[0]
return False
def __ge__(self, other):
if isinstance(other, HTTPStatus):
return self.value >= other.value
elif isinstance(other, int):
return self.value >= other
elif isinstance(other, (tuple, list)) and len(other) > 1:
return self.value >= other[0]
return False
def __lt__(self, other):
if isinstance(other, HTTPStatus):
return self.value < other.value
elif isinstance(other, int):
return self.value < other
elif isinstance(other, (tuple, list)) and len(other) > 1:
return self.value < other[0]
return False
def __le__(self, other):
if isinstance(other, HTTPStatus):
return self.value <= other.value
elif isinstance(other, int):
return self.value <= other
elif isinstance(other, (tuple, list)) and len(other) > 1:
return self.value <= other[0]
return False
# informational
CONTINUE = 100, 'Continue', 'Request received, please continue'
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = 102, 'Processing'
# success
OK = 200, 'OK', 'Request fulfilled, document follows'
CREATED = 201, 'Created', 'Document created, URL follows'
ACCEPTED = (202, 'Accepted',
'Request accepted, processing continues off-line')
NON_AUTHORITATIVE_INFORMATION = (203,
'Non-Authoritative Information', 'Request fulfilled from cache')
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
MULTI_STATUS = 207, 'Multi-Status'
ALREADY_REPORTED = 208, 'Already Reported'
IM_USED = 226, 'IM Used'
# redirection
MULTIPLE_CHOICES = (300, 'Multiple Choices',
'Object has several resources -- see URI list')
MOVED_PERMANENTLY = (301, 'Moved Permanently',
'Object moved permanently -- see URI list')
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
NOT_MODIFIED = (304, 'Not Modified',
'Document has not changed since given time')
USE_PROXY = (305, 'Use Proxy',
'You must use proxy specified in Location to access this resource')
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
'Object moved temporarily -- see URI list')
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
'Object moved temporarily -- see URI list')
# client error
BAD_REQUEST = (400, 'Bad Request',
'Bad request syntax or unsupported method')
UNAUTHORIZED = (401, 'Unauthorized',
'No permission -- see authorization schemes')
PAYMENT_REQUIRED = (402, 'Payment Required',
'No payment -- see charging schemes')
FORBIDDEN = (403, 'Forbidden',
'Request forbidden -- authorization will not help')
NOT_FOUND = (404, 'Not Found',
'Nothing matches the given URI')
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
'Specified method is invalid for this resource')
NOT_ACCEPTABLE = (406, 'Not Acceptable',
'URI not available in preferred format')
PROXY_AUTHENTICATION_REQUIRED = (407,
'Proxy Authentication Required',
'You must authenticate with this proxy before proceeding')
REQUEST_TIMEOUT = (408, 'Request Timeout',
'Request timed out; try again later')
CONFLICT = 409, 'Conflict', 'Request conflict'
GONE = (410, 'Gone',
'URI no longer exists and has been permanently removed')
LENGTH_REQUIRED = (411, 'Length Required',
'Client must specify Content-Length')
PRECONDITION_FAILED = (412, 'Precondition Failed',
'Precondition in headers is false')
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
'Entity is too large')
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
'URI is too long')
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
'Entity body in unsupported format')
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
'Requested Range Not Satisfiable',
'Cannot satisfy request range')
EXPECTATION_FAILED = (417, 'Expectation Failed',
'Expect condition could not be satisfied')
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = (428, 'Precondition Required',
'The origin server requires the request to be conditional')
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
'The user has sent too many requests in '
'a given amount of time ("rate limiting")')
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
'Request Header Fields Too Large',
'The server is unwilling to process the request because its header '
'fields are too large')
# server errors
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
'Server got itself in trouble')
NOT_IMPLEMENTED = (501, 'Not Implemented',
'Server does not support this operation')
BAD_GATEWAY = (502, 'Bad Gateway',
'Invalid responses from another server/proxy')
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
'The server cannot process the request due to a high load')
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
'The gateway server did not receive a timely response')
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
'Cannot fulfill request')
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
LOOP_DETECTED = 508, 'Loop Detected'
NOT_EXTENDED = 510, 'Not Extended'
NETWORK_AUTHENTICATION_REQUIRED = (511,
'Network Authentication Required',
'The client needs to authenticate to gain network access')
|
/sanic-restplus-0.6.4.tar.gz/sanic-restplus-0.6.4/sanic_restplus/_http.py
| 0.803135 | 0.243086 |
_http.py
|
pypi
|
import logging
import re
from collections import OrderedDict
from inspect import isclass
from .errors import RestError
log = logging.getLogger(__name__)
LEXER = re.compile(r'\{|\}|\,|[\w_:\-\*]+')
class MaskError(RestError):
'''Raised when an error occurs on mask'''
pass
class ParseError(MaskError):
'''Raised when the mask parsing failed'''
pass
class Mask(OrderedDict):
'''
Hold a parsed mask.
:param str|dict|Mask mask: A mask, parsed or not
:param bool skip: If ``True``, missing fields won't appear in result
'''
def __init__(self, mask=None, skip=False, **kwargs):
self.skip = skip
if isinstance(mask, str):
super(Mask, self).__init__()
self.parse(mask)
elif isinstance(mask, (dict, OrderedDict)):
super(Mask, self).__init__(mask, **kwargs)
else:
self.skip = skip
super(Mask, self).__init__(**kwargs)
def parse(self, mask):
'''
Parse a fields mask.
Expect something in the form::
{field,nested{nested_field,another},last}
External brackets are optionals so it can also be written::
field,nested{nested_field,another},last
All extras characters will be ignored.
:param str mask: the mask string to parse
:raises ParseError: when a mask is unparseable/invalid
'''
if not mask:
return
mask = self.clean(mask)
fields = self
previous = None
stack = []
for token in LEXER.findall(mask):
if token == '{':
if previous not in fields:
raise ParseError('Unexpected opening bracket')
fields[previous] = Mask(skip=self.skip)
stack.append(fields)
fields = fields[previous]
elif token == '}':
if not stack:
raise ParseError('Unexpected closing bracket')
fields = stack.pop()
elif token == ',':
if previous in (',', '{', None):
raise ParseError('Unexpected comma')
else:
fields[token] = True
previous = token
if stack:
raise ParseError('Missing closing bracket')
def clean(self, mask):
'''Remove unecessary characters'''
mask = mask.replace('\n', '').strip()
# External brackets are optional
if mask[0] == '{':
if mask[-1] != '}':
raise ParseError('Missing closing bracket')
mask = mask[1:-1]
return mask
def apply(self, data):
'''
Apply a fields mask to the data.
:param data: The data or model to apply mask on
:raises MaskError: when unable to apply the mask
'''
from . import fields
# Should handle lists
if isinstance(data, (list, tuple, set)):
return [self.apply(d) for d in data]
elif isinstance(data, (fields.Nested, fields.List, fields.Polymorph)):
return data.clone(self)
elif type(data) == fields.Raw:
return fields.Raw(default=data.default, attribute=data.attribute, mask=self)
elif data == fields.Raw:
return fields.Raw(mask=self)
elif isinstance(data, fields.Raw) or isclass(data) and issubclass(data, fields.Raw):
# Not possible to apply a mask on these remaining fields types
raise MaskError('Mask is inconsistent with model')
# Should handle objects
elif (not isinstance(data, (dict, OrderedDict)) and hasattr(data, '__dict__')):
data = data.__dict__
return self.filter_data(data)
def filter_data(self, data):
'''
Handle the data filtering given a parsed mask
:param dict data: the raw data to filter
:param list mask: a parsed mask tofilter against
:param bool skip: whether or not to skip missing fields
'''
out = {}
for field, content in self.items():
if field == '*':
continue
elif isinstance(content, Mask):
nested = data.get(field, None)
if self.skip and nested is None:
continue
elif nested is None:
out[field] = None
else:
out[field] = content.apply(nested)
elif self.skip and field not in data:
continue
else:
out[field] = data.get(field, None)
if '*' in self.keys():
for key, value in data.items():
if key not in out:
out[key] = value
return out
def __str__(self):
return '{{{0}}}'.format(','.join([
''.join((k, str(v))) if isinstance(v, Mask) else k
for k, v in self.items()
]))
def apply(data, mask, skip=False):
'''
Apply a fields mask to the data.
:param data: The data or model to apply mask on
:param str|Mask mask: the mask (parsed or not) to apply on data
:param bool skip: If rue, missing field won't appear in result
:raises MaskError: when unable to apply the mask
'''
return Mask(mask, skip).apply(data)
|
/sanic-restplus-0.6.4.tar.gz/sanic-restplus-0.6.4/sanic_restplus/mask.py
| 0.678433 | 0.348257 |
mask.py
|
pypi
|
import re
import fnmatch
import inspect
from calendar import timegm
from datetime import date, datetime
from decimal import Decimal, ROUND_HALF_EVEN
from email.utils import formatdate
from functools import lru_cache
from urllib.parse import urlparse, urlunparse
from .inputs import date_from_iso8601, datetime_from_iso8601, datetime_from_rfc822, boolean
from .errors import RestError
from .marshalling import marshal
from .utils import camel_to_dash, not_none
__all__ = ('Raw', 'String', 'FormattedString', 'Url', 'DateTime', 'Date',
'Boolean', 'Integer', 'Float', 'Arbitrary', 'Fixed',
'Nested', 'List', 'ClassName', 'Polymorph', 'Wildcard',
'StringMixin', 'MinMaxMixin', 'NumberMixin', 'MarshallingError')
class MarshallingError(RestError):
"""
This is an encapsulating Exception in case of marshalling error.
"""
def __init__(self, underlying_exception):
# just put the contextual representation of the error to hint on what
# went wrong without exposing internals
super(MarshallingError, self).__init__(str(underlying_exception))
def is_indexable_but_not_string(obj):
return not hasattr(obj, "strip") and hasattr(obj, "__iter__")
def get_value(key, obj, default=None):
'''Helper for pulling a keyed value off various types of objects'''
if isinstance(key, int):
return _get_value_for_key(key, obj, default)
elif callable(key):
return key(obj)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if is_indexable_but_not_string(obj):
try:
return obj[key]
except (IndexError, TypeError, KeyError):
pass
return getattr(obj, key, default)
def to_marshallable_type(obj):
'''
Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type
'''
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
return dict(obj.__dict__)
class Raw(object):
'''
Raw provides a base field class from which others should extend. It
applies no formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a :class:`MarshallingError` in case of parsing problem.
:param default: The default value for the field, if no value is
specified.
:param attribute: If the public facing value differs from the internal
value, use this to retrieve a different attribute from the response
than the publicly named value.
:param str title: The field title (for documentation purpose)
:param str description: The field description (for documentation purpose)
:param bool required: Is the field required ?
:param bool readonly: Is the field read only ? (for documentation purpose)
:param example: An optional data example (for documentation purpose)
:param callable mask: An optional mask function to be applied to output
'''
#: The JSON/Swagger schema type
__schema_type__ = 'object'
#: The JSON/Swagger schema format
__schema_format__ = None
#: An optional JSON/Swagger schema example
__schema_example__ = None
def __init__(self, default=None, attribute=None, title=None, description=None,
required=None, readonly=None, example=None, mask=None, **kwargs):
self.attribute = attribute
self.default = default
self.title = title
self.description = description
self.required = required
self.readonly = readonly
self.example = example or self.__schema_example__
self.mask = mask
def format(self, value):
'''
Formats a field's value. No-op by default - field classes that
modify how the value of existing object keys should be presented should
override this and apply the appropriate formatting.
:param value: The value to format
:raises MarshallingError: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
'''
return value
def output(self, key, obj, **kwargs):
'''
Pulls the value for the given key from the object, applies the
field's formatting and returns the result. If the key is not found
in the object, returns the default value. Field classes that create
values which do not require the existence of the key in the object
should override this and return the desired value.
:raises MarshallingError: In case of formatting problem
'''
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
default = self._v('default')
return self.format(default) if default else default
try:
data = self.format(value)
except MarshallingError as e:
msg = 'Unable to marshal field "{0}" value "{1}": {2}'.format(key, value, str(e))
raise MarshallingError(msg)
return self.mask.apply(data) if self.mask else data
def _v(self, key):
'''Helper for getting a value from attribute allowing callable'''
value = getattr(self, key)
return value() if callable(value) else value
@property
@lru_cache()
def __schema__(self):
return not_none(self.schema())
def schema(self):
return {
'type': self.__schema_type__,
'format': self.__schema_format__,
'title': self.title,
'description': self.description,
'readOnly': self.readonly,
'default': self._v('default'),
'example': self.example,
}
class Nested(Raw):
'''
Allows you to nest one set of fields inside another.
See :ref:`nested-field` for more information
:param dict model: The model dictionary to nest
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param bool skip_none: Optional key will be used to eliminate inner fields
which value is None or the inner field's key not
exist in data
:param kwargs: If ``default`` keyword argument is present, a nested
dictionary will be marshaled as its value if nested dictionary is
all-null keys (e.g. lets you return an empty JSON object instead of
null)
'''
__schema_type__ = None
def __init__(self, model, allow_null=False, skip_none=False, as_list=False, **kwargs):
self.model = model
self.as_list = as_list
self.allow_null = allow_null
self.skip_none = skip_none
super(Nested, self).__init__(**kwargs)
@property
def nested(self):
return getattr(self.model, 'resolved', self.model)
def output(self, key, obj, ordered=False, **kwargs):
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
if self.allow_null:
return None
elif self.default is not None:
return self.default
return marshal(value, self.nested, skip_none=self.skip_none, ordered=ordered)
def schema(self):
schema = super(Nested, self).schema()
ref = '#/definitions/{0}'.format(self.nested.name)
if self.as_list:
schema['type'] = 'array'
schema['items'] = {'$ref': ref}
elif any(schema.values()):
# There is already some properties in the schema
allOf = schema.get('allOf', [])
allOf.append({'$ref': ref})
schema['allOf'] = allOf
else:
schema['$ref'] = ref
return schema
def clone(self, mask=None):
kwargs = self.__dict__.copy()
model = kwargs.pop('model')
if mask:
model = mask.apply(model.resolved if hasattr(model, 'resolved') else model)
return self.__class__(model, **kwargs)
class List(Raw):
'''
Field for marshalling lists of other fields.
See :ref:`list-field` for more information.
:param cls_or_instance: The field type the list will contain.
'''
def __init__(self, cls_or_instance, **kwargs):
self.min_items = kwargs.pop('min_items', None)
self.max_items = kwargs.pop('max_items', None)
self.unique = kwargs.pop('unique', None)
super(List, self).__init__(**kwargs)
error_msg = 'The type of the list elements must be a subclass of fields.Raw'
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Raw):
raise MarshallingError(error_msg)
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, Raw):
raise MarshallingError(error_msg)
self.container = cls_or_instance
def format(self, value):
# Convert all instances in typed list to container type
if isinstance(value, set):
value = list(value)
is_nested = isinstance(self.container, Nested) or type(self.container) is Raw
def is_attr(val):
return self.container.attribute and hasattr(val, self.container.attribute)
return [
self.container.output(idx,
val if (isinstance(val, dict) or is_attr(val)) and not is_nested else value)
for idx, val in enumerate(value)
]
def output(self, key, data, ordered=False, **kwargs):
value = get_value(key if self.attribute is None else self.attribute, data)
# we cannot really test for external dict behavior
if is_indexable_but_not_string(value) and not isinstance(value, dict):
return self.format(value)
if value is None:
return self._v('default')
return [marshal(value, self.container.nested)]
def schema(self):
schema = super(List, self).schema()
schema.update(minItems=self._v('min_items'),
maxItems=self._v('max_items'),
uniqueItems=self._v('unique'))
schema['type'] = 'array'
schema['items'] = self.container.__schema__
return schema
def clone(self, mask=None):
kwargs = self.__dict__.copy()
model = kwargs.pop('container')
if mask:
model = mask.apply(model)
return self.__class__(model, **kwargs)
class StringMixin(object):
__schema_type__ = 'string'
def __init__(self, *args, **kwargs):
self.min_length = kwargs.pop('min_length', None)
self.max_length = kwargs.pop('max_length', None)
self.pattern = kwargs.pop('pattern', None)
super(StringMixin, self).__init__(*args, **kwargs)
def schema(self):
schema = super(StringMixin, self).schema()
schema.update(minLength=self._v('min_length'),
maxLength=self._v('max_length'),
pattern=self._v('pattern'))
return schema
class MinMaxMixin(object):
def __init__(self, *args, **kwargs):
self.minimum = kwargs.pop('min', None)
self.exclusiveMinimum = kwargs.pop('exclusiveMin', None)
self.maximum = kwargs.pop('max', None)
self.exclusiveMaximum = kwargs.pop('exclusiveMax', None)
super(MinMaxMixin, self).__init__(*args, **kwargs)
def schema(self):
schema = super(MinMaxMixin, self).schema()
schema.update(minimum=self._v('minimum'),
exclusiveMinimum=self._v('exclusiveMinimum'),
maximum=self._v('maximum'),
exclusiveMaximum=self._v('exclusiveMaximum'))
return schema
class NumberMixin(MinMaxMixin):
__schema_type__ = 'number'
def __init__(self, *args, **kwargs):
self.multiple = kwargs.pop('multiple', None)
super(NumberMixin, self).__init__(*args, **kwargs)
def schema(self):
schema = super(NumberMixin, self).schema()
schema.update(multipleOf=self._v('multiple'))
return schema
class String(StringMixin, Raw):
'''
Marshal a value as a string.
'''
def __init__(self, *args, **kwargs):
self.enum = kwargs.pop('enum', None)
self.discriminator = kwargs.pop('discriminator', None)
super(String, self).__init__(*args, **kwargs)
self.required = self.discriminator or self.required
def format(self, value):
try:
return str(value)
except ValueError as ve:
raise MarshallingError(ve)
def schema(self):
enum = self._v('enum')
schema = super(String, self).schema()
if enum:
schema.update(enum=enum)
if enum and schema['example'] is None:
schema['example'] = enum[0]
return schema
class Integer(NumberMixin, Raw):
'''
Field for outputting an integer value.
:param int default: The default value for the field, if no value is specified.
'''
__schema_type__ = 'integer'
def format(self, value):
try:
if value is None:
return self.default
return int(value)
except ValueError as ve:
raise MarshallingError(ve)
class Float(NumberMixin, Raw):
'''
A double as IEEE-754 double precision.
ex : 3.141592653589793 3.1415926535897933e-06 3.141592653589793e+24 nan inf -inf
'''
def format(self, value):
try:
return float(value)
except ValueError as ve:
raise MarshallingError(ve)
class Arbitrary(NumberMixin, Raw):
'''
A floating point number with an arbitrary precision.
ex: 634271127864378216478362784632784678324.23432
'''
def format(self, value):
return str(Decimal(value))
ZERO = Decimal()
class Fixed(NumberMixin, Raw):
'''
A decimal number with a fixed precision.
'''
def __init__(self, decimals=5, **kwargs):
super(Fixed, self).__init__(**kwargs)
self.precision = Decimal('0.' + '0' * (decimals - 1) + '1')
def format(self, value):
dvalue = Decimal(value)
if not dvalue.is_normal() and dvalue != ZERO:
raise MarshallingError('Invalid Fixed precision number.')
return str(dvalue.quantize(self.precision, rounding=ROUND_HALF_EVEN))
class Boolean(Raw):
'''
Field for outputting a boolean value.
Empty collections such as ``""``, ``{}``, ``[]``, etc. will be converted to ``False``.
'''
__schema_type__ = 'boolean'
def format(self, value):
return boolean(value)
class DateTime(MinMaxMixin, Raw):
'''
Return a formatted datetime string in UTC. Supported formats are RFC 822 and ISO 8601.
See :func:`email.utils.formatdate` for more info on the RFC 822 format.
See :meth:`datetime.datetime.isoformat` for more info on the ISO 8601 format.
:param str dt_format: ``rfc822`` or ``iso8601``
'''
__schema_type__ = 'string'
__schema_format__ = 'date-time'
def __init__(self, dt_format='iso8601', **kwargs):
super(DateTime, self).__init__(**kwargs)
self.dt_format = dt_format
def parse(self, value):
if value is None:
return None
elif isinstance(value, str):
parser = datetime_from_iso8601 if self.dt_format == 'iso8601' else datetime_from_rfc822
return parser(value)
elif isinstance(value, datetime):
return value
elif isinstance(value, date):
return datetime(value.year, value.month, value.day)
else:
raise ValueError('Unsupported DateTime format')
def format(self, value):
try:
value = self.parse(value)
if self.dt_format == 'iso8601':
return self.format_iso8601(value)
elif self.dt_format == 'rfc822':
return self.format_rfc822(value)
else:
raise MarshallingError(
'Unsupported date format %s' % self.dt_format
)
except (AttributeError, ValueError) as e:
raise MarshallingError(e)
def format_rfc822(self, dt):
'''
Turn a datetime object into a formatted date.
:param datetime dt: The datetime to transform
:return: A RFC 822 formatted date string
'''
return formatdate(timegm(dt.utctimetuple()))
def format_iso8601(self, dt):
'''
Turn a datetime object into an ISO8601 formatted date.
:param datetime dt: The datetime to transform
:return: A ISO 8601 formatted date string
'''
return dt.isoformat()
def _for_schema(self, name):
value = self.parse(self._v(name))
return self.format(value) if value else None
def schema(self):
schema = super(DateTime, self).schema()
schema['default'] = self._for_schema('default')
schema['minimum'] = self._for_schema('minimum')
schema['maximum'] = self._for_schema('maximum')
return schema
class Date(DateTime):
'''
Return a formatted date string in UTC in ISO 8601.
See :meth:`datetime.date.isoformat` for more info on the ISO 8601 format.
'''
__schema_format__ = 'date'
def __init__(self, **kwargs):
kwargs.pop('dt_format', None)
super(Date, self).__init__(dt_format='iso8601', **kwargs)
def parse(self, value):
if value is None:
return None
elif isinstance(value, str):
return date_from_iso8601(value)
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
else:
raise ValueError('Unsupported Date format')
class Url(StringMixin, Raw):
'''
A string representation of a Url
:param str endpoint: Endpoint name. If endpoint is ``None``, ``request.endpoint`` is used instead
:param bool absolute: If ``True``, ensures that the generated urls will have the hostname included
:param str scheme: URL scheme specifier (e.g. ``http``, ``https``)
'''
def __init__(self, endpoint=None, absolute=False, scheme=None, **kwargs):
super(Url, self).__init__(**kwargs)
self.endpoint = endpoint
self.absolute = absolute
self.scheme = scheme
def output(self, key, obj, **kwargs):
raise NotImplementedError("fields.Url is not implemented on Sanic-Restplus")
try:
data = to_marshallable_type(obj)
endpoint = self.endpoint if self.endpoint is not None else request.endpoint
o = urlparse(url_for(endpoint, _external=self.absolute, **data))
if self.absolute:
scheme = self.scheme if self.scheme is not None else o.scheme
return urlunparse((scheme, o.netloc, o.path, "", "", ""))
return urlunparse(("", "", o.path, "", "", ""))
except TypeError as te:
raise MarshallingError(te)
class FormattedString(StringMixin, Raw):
'''
FormattedString is used to interpolate other values from
the response into this field. The syntax for the source string is
the same as the string :meth:`~str.format` method from the python
stdlib.
Ex::
fields = {
'name': fields.String,
'greeting': fields.FormattedString("Hello {name}")
}
data = {
'name': 'Doug',
}
marshal(data, fields)
:param str src_str: the string to format with the other values from the response.
'''
def __init__(self, src_str, **kwargs):
super(FormattedString, self).__init__(**kwargs)
self.src_str = str(src_str)
def output(self, key, obj, **kwargs):
try:
data = to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
raise MarshallingError(error)
class ClassName(String):
'''
Return the serialized object class name as string.
:param bool dash: If `True`, transform CamelCase to kebab_case.
'''
def __init__(self, dash=False, **kwargs):
super(ClassName, self).__init__(**kwargs)
self.dash = dash
def output(self, key, obj, **kwargs):
classname = obj.__class__.__name__
if classname == 'dict':
return 'object'
return camel_to_dash(classname) if self.dash else classname
class Polymorph(Nested):
'''
A Nested field handling inheritance.
Allows you to specify a mapping between Python classes and fields specifications.
.. code-block:: python
mapping = {
Child1: child1_fields,
Child2: child2_fields,
}
fields = api.model('Thing', {
owner: fields.Polymorph(mapping)
})
:param dict mapping: Maps classes to their model/fields representation
'''
def __init__(self, mapping, required=False, **kwargs):
self.mapping = mapping
parent = self.resolve_ancestor(list(mapping.values()))
super(Polymorph, self).__init__(parent, allow_null=not required, **kwargs)
def output(self, key, obj, ordered=False, **kwargs):
# Copied from upstream NestedField
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
if self.allow_null:
return None
elif self.default is not None:
return self.default
# Handle mappings
if not hasattr(value, '__class__'):
raise ValueError('Polymorph field only accept class instances')
candidates = [fields for cls, fields in self.mapping.items() if isinstance(value, cls)]
if len(candidates) <= 0:
raise ValueError('Unknown class: ' + value.__class__.__name__)
elif len(candidates) > 1:
raise ValueError('Unable to determine a candidate for: ' + value.__class__.__name__)
else:
return marshal(value, candidates[0].resolved, mask=self.mask, ordered=ordered)
def resolve_ancestor(self, models):
'''
Resolve the common ancestor for all models.
Assume there is only one common ancestor.
'''
ancestors = [m.ancestors for m in models]
candidates = set.intersection(*ancestors)
if len(candidates) != 1:
field_names = [f.name for f in models]
raise ValueError('Unable to determine the common ancestor for: ' + ', '.join(field_names))
parent_name = candidates.pop()
return models[0].get_parent(parent_name)
def clone(self, mask=None):
data = self.__dict__.copy()
mapping = data.pop('mapping')
for field in ('allow_null', 'model'):
data.pop(field, None)
data['mask'] = mask
return Polymorph(mapping, **data)
class Wildcard(Raw):
'''
Field for marshalling list of "unkown" fields.
:param cls_or_instance: The field type the list will contain.
'''
exclude = set()
# cache the flat object
_flat = None
_obj = None
_cache = set()
_last = None
def __init__(self, cls_or_instance, **kwargs):
super(Wildcard, self).__init__(**kwargs)
error_msg = 'The type of the wildcard elements must be a subclass of fields.Raw'
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Raw):
raise MarshallingError(error_msg)
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, Raw):
raise MarshallingError(error_msg)
self.container = cls_or_instance
def _flatten(self, obj):
if obj is None:
return None
if obj == self._obj and self._flat is not None:
return self._flat
if isinstance(obj, dict):
self._flat = [(k,v) for k,v in obj.items()]
else:
def __match_attributes(attribute):
attr_name, attr_obj = attribute
if inspect.isroutine(attr_obj) or \
(attr_name.startswith('__') and attr_name.endswith('__')):
return False
return True
attributes = inspect.getmembers(obj)
self._flat = [x for x in attributes if __match_attributes(x)]
self._cache = set()
self._obj = obj
return self._flat
@property
def key(self):
return self._last
def reset(self):
self.exclude = set()
self._flat = None
self._obj = None
self._cache = set()
self._last = None
def output(self, key, obj, ordered=False):
value = None
reg = fnmatch.translate(key)
if self._flatten(obj):
while True:
try:
# we are using pop() so that we don't
# loop over the whole object every time dropping the
# complexity to O(n)
(objkey, val) = self._flat.pop()
if objkey not in self._cache and \
objkey not in self.exclude and \
re.match(reg, objkey, re.IGNORECASE):
value = val
self._cache.add(objkey)
self._last = objkey
break
except IndexError:
break
if value is None:
if self.default is not None:
return self.container.format(self.default)
return None
return self.container.format(value)
def schema(self):
schema = super(Wildcard, self).schema()
schema['type'] = 'object'
schema['additionalProperties'] = self.container.__schema__
return schema
def clone(self):
kwargs = self.__dict__.copy()
model = kwargs.pop('container')
return self.__class__(model, **kwargs)
|
/sanic-restplus-0.6.4.tar.gz/sanic-restplus-0.6.4/sanic_restplus/fields.py
| 0.78838 | 0.155078 |
fields.py
|
pypi
|
# Sanic Routing
## Background
Beginning in v21.3, Sanic makes use of this new AST-style router in two use cases:
1. Routing paths; and
2. Routing signals.
Therefore, this package comes with a `BaseRouter` that needs to be subclassed in order to be used for its specific needs.
Most Sanic users should never need to concern themselves with the details here.
## Basic Example
A simple implementation:
```python
import logging
from sanic_routing import BaseRouter
logging.basicConfig(level=logging.DEBUG)
class Router(BaseRouter):
def get(self, path, *args, **kwargs):
return self.resolve(path, *args, **kwargs)
router = Router()
router.add("/<foo>", lambda: ...)
router.finalize()
router.tree.display()
logging.info(router.find_route_src)
route, handler, params = router.get("/matchme", method="BASE", extra=None)
```
The above snippet uses `router.tree.display()` to show how the router has decided to arrange the routes into a tree. In this simple example:
```
<Node: level=0>
<Node: part=__dynamic__:str, level=1, groups=[<RouteGroup: path=<foo:str> len=1>], dynamic=True>
```
We can can see the code that the router has generated for us. It is available as a string at `router.find_route_src`.
```python
def find_route(path, method, router, basket, extra):
parts = tuple(path[1:].split(router.delimiter))
num = len(parts)
# node=1 // part=__dynamic__:str
if num == 1: # CHECK 1
try:
basket['__matches__'][0] = str(parts[0])
except ValueError:
pass
else:
# Return 1
return router.dynamic_routes[('<__dynamic__:str>',)][0], basket
raise NotFound
```
_FYI: If you are on Python 3.9, you can see a representation of the source after compilation at `router.find_route_src_compiled`_
## What's it doing?
Therefore, in general implementation requires you to:
1. Define a router with a `get` method;
2. Add one or more routes;
3. Finalize the router (`router.finalize()`); and
4. Call the router's `get` method.
_NOTE: You can call `router.finalize(False)` if you do not want to compile the source code into executable form. This is useful if you only intend to review the generated output._
Every time you call `router.add` you create one (1) new `Route` instance. Even if that one route is created with multiple methods, it generates a single instance. If you `add()` another `Route` that has a similar path structure (but, perhaps has differen methods) they will be grouped together into a `RouteGroup`. It is worth also noting that a `RouteGroup` is created the first time you call `add()`, but subsequent similar routes will reuse the existing grouping instance.
When you call `finalize()`, it is taking the defined route groups and arranging them into "nodes" in a hierarchical tree. A single node is a path segment. A `Node` instance can have one or more `RouteGroup` on it where the `Node` is the termination point for that path.
Perhaps an example is easier:
```python
router.add("/path/to/<foo>", lambda: ...)
router.add("/path/to/<foo:int>", lambda: ...)
router.add("/path/to/different/<foo>", lambda: ...)
router.add("/path/to/different/<foo>", lambda: ..., methods=["one", "two"])
```
The generated `RouteGroup` instances (3):
```
<RouteGroup: path=path/to/<foo:str> len=1>
<RouteGroup: path=path/to/<foo:int> len=1>
<RouteGroup: path=path/to/different/<foo:str> len=2>
```
The generated `Route` instances (4):
```
<Route: path=path/to/<foo:str>>
<Route: path=path/to/<foo:int>>
<Route: path=path/to/different/<foo:str>>
<Route: path=path/to/different/<foo:str>>
```
The Node Tree:
```
<Node: level=0>
<Node: part=path, level=1>
<Node: part=to, level=2>
<Node: part=different, level=3>
<Node: part=__dynamic__:str, level=4, groups=[<RouteGroup: path=path/to/different/<foo:str> len=2>], dynamic=True>
<Node: part=__dynamic__:int, level=3, groups=[<RouteGroup: path=path/to/<foo:int> len=1>], dynamic=True>
<Node: part=__dynamic__:str, level=3, groups=[<RouteGroup: path=path/to/<foo:str> len=1>], dynamic=True>
```
And, the generated source code:
```python
def find_route(path, method, router, basket, extra):
parts = tuple(path[1:].split(router.delimiter))
num = len(parts)
# node=1 // part=path
if num > 1: # CHECK 1
if parts[0] == "path": # CHECK 4
# node=1.1 // part=to
if num > 2: # CHECK 1
if parts[1] == "to": # CHECK 4
# node=1.1.1 // part=different
if num > 3: # CHECK 1
if parts[2] == "different": # CHECK 4
# node=1.1.1.1 // part=__dynamic__:str
if num == 4: # CHECK 1
try:
basket['__matches__'][3] = str(parts[3])
except ValueError:
pass
else:
if method in frozenset({'one', 'two'}):
route_idx = 0
elif method in frozenset({'BASE'}):
route_idx = 1
else:
raise NoMethod
# Return 1.1.1.1
return router.dynamic_routes[('path', 'to', 'different', '<__dynamic__:str>')][route_idx], basket
# node=1.1.2 // part=__dynamic__:int
if num >= 3: # CHECK 1
try:
basket['__matches__'][2] = int(parts[2])
except ValueError:
pass
else:
if num == 3: # CHECK 5
# Return 1.1.2
return router.dynamic_routes[('path', 'to', '<__dynamic__:int>')][0], basket
# node=1.1.3 // part=__dynamic__:str
if num >= 3: # CHECK 1
try:
basket['__matches__'][2] = str(parts[2])
except ValueError:
pass
else:
if num == 3: # CHECK 5
# Return 1.1.3
return router.dynamic_routes[('path', 'to', '<__dynamic__:str>')][0], basket
raise NotFound
```
## Special cases
The above example only shows routes that have a dynamic path segment in them (example: `<foo>`). But, there are other use cases that are covered differently:
1. *fully static paths* - These are paths with no parameters (example: `/user/login`). These are basically matched against a key/value store.
2. *regex paths* - If a route as a single regular expression match, then the whole route will be matched via regex. In general, this happens inline not too dissimilar than what we see in the above example.
3. *special regex paths* - The router comes with a special `path` type (example: `<foo:path>`) that can match on an expanded delimiter. This is also true for any regex that uses the path delimiter in it. These cannot be matched in the normal course since they are of unknown length.
|
/sanic-routing-23.6.0.tar.gz/sanic-routing-23.6.0/README.md
| 0.47025 | 0.818229 |
README.md
|
pypi
|
import re
import typing as t
from types import SimpleNamespace
from warnings import warn
from .exceptions import InvalidUsage, ParameterNameConflicts
from .patterns import ParamInfo
from .utils import Immutable, parts_to_path, path_to_parts
class Requirements(Immutable):
def __hash__(self):
return hash(frozenset(self.items()))
class Route:
__slots__ = (
"_params",
"_raw_path",
"ctx",
"extra",
"handler",
"labels",
"methods",
"name",
"overloaded",
"params",
"parts",
"path",
"pattern",
"regex",
"requirements",
"router",
"static",
"strict",
"unquote",
)
#: A container for route meta-data
ctx: SimpleNamespace
#: A container for route application-data
extra: SimpleNamespace
#: The route handler
handler: t.Callable[..., t.Any]
#: The HTTP methods that the route can handle
methods: t.FrozenSet[str]
#: The route name, either generated or as defined in the route definition
name: str
#: The raw version of the path exploded (see also
#: :py:attr:`~sanic_routing.route.Route.segments`)
parts: t.Tuple[str, ...]
#: The _reconstructed_ path after the Route has been normalized.
#: Does not contain preceding ``/`` (see also
#: :py:attr:`~sanic_routing.route.Route.uri`)
path: str
#: A regex version of the :py:attr:`~sanic_routing.route.Route.path`
pattern: t.Optional[str]
#: Whether the route requires regular expression evaluation
regex: bool
#: A representation of the non-path route requirements
requirements: Requirements
#: When ``True``, the route does not have any dynamic path parameters
static: bool
#: Whether the route should be matched with strict evaluation
strict: bool
#: Whether the route should be unquoted after matching if (for example) it
#: is suspected to contain non-URL friendly characters
unquote: bool
def __init__(
self,
router,
raw_path: str,
name: str,
handler: t.Callable[..., t.Any],
methods: t.Union[t.Sequence[str], t.FrozenSet[str]],
requirements: t.Optional[t.Dict[str, t.Any]] = None,
strict: bool = False,
unquote: bool = False,
static: bool = False,
regex: bool = False,
overloaded: bool = False,
):
self.router = router
self.name = name
self.handler = handler # type: ignore
self.methods = frozenset(methods)
self.requirements = Requirements(requirements or {})
self.ctx = SimpleNamespace()
self.extra = SimpleNamespace()
self._params: t.Dict[int, ParamInfo] = {}
self._raw_path = raw_path
# Main goal is to do some normalization. Any dynamic segments
# that are missing a type are rewritten with str type
ingested_path = self._ingest_path(raw_path)
# By passing the path back and forth to deconstruct and reconstruct
# we can normalize it and make sure we are dealing consistently
parts = path_to_parts(ingested_path, self.router.delimiter)
self.path = parts_to_path(parts, delimiter=self.router.delimiter)
self.parts = parts
self.static = static
self.regex = regex
self.overloaded = overloaded
self.pattern = None
self.strict: bool = strict
self.unquote: bool = unquote
self.labels: t.Optional[t.List[str]] = None
self._setup_params()
def __str__(self):
display = (
f"name={self.name} path={self.path or self.router.delimiter}"
if self.name and self.name != self.path
else f"path={self.path or self.router.delimiter}"
)
return f"<{self.__class__.__name__}: {display}>"
def __repr__(self) -> str:
return str(self)
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
return False
# Equality specifically uses self.segments and not self.parts.
# In general, these properties are nearly identical.
# self.segments is generalized and only displays dynamic param types
# and self.parts has both the param key and the param type.
# In this test, we use the & operator so that we create a union and a
# positive equality if there is one or more overlaps in the methods.
return bool(
(
self.segments,
self.requirements,
)
== (
other.segments,
other.requirements,
)
and (self.methods & other.methods)
)
def _ingest_path(self, path):
segments = []
for part in path.split(self.router.delimiter):
if part.startswith("<") and ":" not in part:
name = part[1:-1]
part = f"<{name}:str>"
segments.append(part)
return self.router.delimiter.join(segments)
def _setup_params(self):
key_path = parts_to_path(
path_to_parts(self.raw_path, self.router.delimiter),
self.router.delimiter,
)
if not self.static:
parts = path_to_parts(key_path, self.router.delimiter)
for idx, part in enumerate(parts):
if part.startswith("<"):
(
name,
label,
_type,
pattern,
param_info_class,
) = self.parse_parameter_string(part[1:-1])
self.add_parameter(
idx,
name,
key_path,
label,
_type,
pattern,
param_info_class,
)
def add_parameter(
self,
idx: int,
name: str,
raw_path: str,
label: str,
cast: t.Type,
pattern=None,
param_info_class=ParamInfo,
):
if pattern and isinstance(pattern, str):
if not pattern.startswith("^"):
pattern = f"^{pattern}"
if not pattern.endswith("$"):
pattern = f"{pattern}$"
pattern = re.compile(pattern)
is_regex = label not in self.router.regex_types
priority = (
0
if is_regex
else list(self.router.regex_types.keys()).index(label)
)
self._params[idx] = param_info_class(
name=name,
raw_path=raw_path,
label=label,
cast=cast,
pattern=pattern,
regex=is_regex,
priority=priority,
)
def _finalize_params(self):
params = dict(self._params)
label_pairs = set([(param.name, idx) for idx, param in params.items()])
labels = [item[0] for item in label_pairs]
if len(labels) != len(set(labels)):
raise ParameterNameConflicts(
f"Duplicate named parameters in: {self._raw_path}"
)
self.labels = labels
self.params = dict(
sorted(params.items(), key=lambda param: self._sorting(param[1]))
)
if not self.regex and any(
":" in param.label for param in self.params.values()
):
raise InvalidUsage(
f"Invalid parameter declaration: {self.raw_path}"
)
def _compile_regex(self):
components = []
for part in self.parts:
if part.startswith("<"):
name, *_, pattern, __ = self.parse_parameter_string(part)
if not isinstance(pattern, str):
pattern = pattern.pattern.strip("^$")
compiled = re.compile(pattern)
if compiled.groups == 1:
if compiled.groupindex:
if list(compiled.groupindex)[0] != name:
raise InvalidUsage(
f"Named group ({list(compiled.groupindex)[0]})"
f" must match your named parameter ({name})"
)
components.append(pattern)
else:
if pattern.count("(") > 1:
raise InvalidUsage(
f"Could not compile pattern {pattern}. "
"Try using a named group instead: "
f"'(?P<{name}>your_matching_group)'"
)
beginning, end = pattern.split("(")
components.append(f"{beginning}(?P<{name}>{end}")
elif compiled.groups > 1:
raise InvalidUsage(f"Invalid matching pattern {pattern}")
else:
components.append(f"(?P<{name}>{pattern})")
else:
components.append(part)
self.pattern = self.router.delimiter + self.router.delimiter.join(
components
)
def finalize(self):
self._finalize_params()
if self.regex:
self._compile_regex()
self.requirements = Immutable(self.requirements)
def reset(self):
self.requirements = dict(self.requirements)
@property
def defined_params(self):
return self._params
@property
def raw_path(self):
"""
The raw path from the route definition
"""
return self._raw_path
@property
def segments(self) -> t.Tuple[str, ...]:
"""
Same as :py:attr:`~sanic_routing.route.Route.parts` except
generalized so that any dynamic parts do not
include param keys since they have no impact on routing.
"""
return tuple(
f"<__dynamic__:{self._params[idx].label}>"
if idx in self._params
else segment
for idx, segment in enumerate(self.parts)
)
@property
def uri(self):
"""
Since :py:attr:`~sanic_routing.route.Route.path` does NOT
include a preceding '/', this adds it back.
"""
return f"{self.router.delimiter}{self.path}"
def _sorting(self, item) -> int:
try:
return list(self.router.regex_types.keys()).index(item.label)
except ValueError:
return len(list(self.router.regex_types.keys()))
def parse_parameter_string(self, parameter_string: str):
"""Parse a parameter string into its constituent name, type, and
pattern
For example::
parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', '[A-z]', <class 'str'>, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
parameter_string = parameter_string.strip("<>")
name = parameter_string
label = "str"
if ":" in parameter_string:
name, label = parameter_string.split(":", 1)
if "=" in label:
label, _ = label.split("=", 1)
if "=" in name:
name, _ = name.split("=", 1)
if not name:
raise ValueError(
f"Invalid parameter syntax: {parameter_string}"
)
if label == "string":
warn(
"Use of 'string' as a path parameter type is deprected, "
"and will be removed in Sanic v21.12. "
f"Instead, use <{name}:str>.",
DeprecationWarning,
)
elif label == "number":
warn(
"Use of 'number' as a path parameter type is deprected, "
"and will be removed in Sanic v21.12. "
f"Instead, use <{name}:float>.",
DeprecationWarning,
)
default = (str, label, ParamInfo)
# Pull from pre-configured types
found = self.router.regex_types.get(label, default)
_type, pattern, param_info_class = found
return name, label, _type, pattern, param_info_class
|
/sanic-routing-23.6.0.tar.gz/sanic-routing-23.6.0/sanic_routing/route.py
| 0.781997 | 0.195844 |
route.py
|
pypi
|
from __future__ import annotations
from typing import FrozenSet, List, Optional, Sequence, Tuple
from sanic_routing.route import Requirements, Route
from sanic_routing.utils import Immutable
from .exceptions import InvalidUsage, RouteExists
class RouteGroup:
methods_index: Immutable
passthru_properties = (
"labels",
"params",
"parts",
"path",
"pattern",
"raw_path",
"regex",
"router",
"segments",
"strict",
"unquote",
"uri",
)
#: The _reconstructed_ path after the Route has been normalized.
#: Does not contain preceding ``/`` (see also
#: :py:attr:`uri`)
path: str
#: A regex version of the :py:attr:`~sanic_routing.route.Route.path`
pattern: Optional[str]
#: Whether the route requires regular expression evaluation
regex: bool
#: The raw version of the path exploded (see also
#: :py:attr:`segments`)
parts: Tuple[str, ...]
#: Same as :py:attr:`parts` except
#: generalized so that any dynamic parts do not
#: include param keys since they have no impact on routing.
segments: Tuple[str, ...]
#: Whether the route should be matched with strict evaluation
strict: bool
#: Whether the route should be unquoted after matching if (for example) it
#: is suspected to contain non-URL friendly characters
unquote: bool
#: Since :py:attr:`path` does NOT
#: include a preceding '/', this adds it back.
uri: str
def __init__(self, *routes) -> None:
if len(set(route.parts for route in routes)) > 1:
raise InvalidUsage("Cannot group routes with differing paths")
if any(routes[-1].strict != route.strict for route in routes):
raise InvalidUsage("Cannot group routes with differing strictness")
route_list = list(routes)
route_list.pop()
self._routes = routes
self.pattern_idx = 0
def __str__(self):
display = (
f"path={self.path or self.router.delimiter} len={len(self.routes)}"
)
return f"<{self.__class__.__name__}: {display}>"
def __repr__(self) -> str:
return str(self)
def __iter__(self):
return iter(self.routes)
def __getitem__(self, key):
return self.routes[key]
def __getattr__(self, key):
# There are a number of properties that all of the routes in the group
# share in common. We pass thrm through to make them available
# on the RouteGroup, and then cache them so that they are permanent.
if key in self.passthru_properties:
value = getattr(self[0], key)
setattr(self, key, value)
return value
raise AttributeError(f"RouteGroup has no '{key}' attribute")
def finalize(self):
self.methods_index = Immutable(
{
method: route
for route in self._routes
for method in route.methods
}
)
def reset(self):
self.methods_index = dict(self.methods_index)
def merge(
self, group: RouteGroup, overwrite: bool = False, append: bool = False
) -> None:
"""
The purpose of merge is to group routes with the same path, but
declarared individually. In other words to group these:
.. code-block:: python
@app.get("/path/to")
def handler1(...):
...
@app.post("/path/to")
def handler2(...):
...
The other main purpose is to look for conflicts and
raise ``RouteExists``
A duplicate route is when:
1. They have the same path and any overlapping methods; AND
2. If they have requirements, they are the same
:param group: Incoming route group
:type group: RouteGroup
:param overwrite: whether to allow an otherwise duplicate route group
to overwrite the existing, if ``True`` will not raise exception
on duplicates, defaults to False
:type overwrite: bool, optional
:param append: whether to allow an otherwise duplicate route group to
append its routes to the existing route group, defaults to False
:type append: bool, optional
:raises RouteExists: Raised when there is a duplicate
"""
_routes = list(self._routes)
for other_route in group.routes:
for current_route in self:
if (
current_route == other_route
or (
current_route.requirements
and not other_route.requirements
)
or (
not current_route.requirements
and other_route.requirements
)
) and not append:
if not overwrite:
raise RouteExists(
f"Route already registered: {self.raw_path} "
f"[{','.join(self.methods)}]"
)
else:
_routes.append(other_route)
self._routes = tuple(_routes)
@property
def depth(self) -> int:
"""
The number of parts in :py:attr:`parts`
"""
return len(self[0].parts)
@property
def dynamic_path(self) -> bool:
return any(
(param.label == "path") or ("/" in param.label)
for param in self.params.values()
)
@property
def methods(self) -> FrozenSet[str]:
""""""
return frozenset(
[method for route in self for method in route.methods]
)
@property
def routes(self) -> Sequence[Route]:
return self._routes
@property
def requirements(self) -> List[Requirements]:
return [route.requirements for route in self if route.requirements]
|
/sanic-routing-23.6.0.tar.gz/sanic-routing-23.6.0/sanic_routing/group.py
| 0.949693 | 0.266703 |
group.py
|
pypi
|
import re
import typing as t
import uuid
from datetime import date, datetime
from types import SimpleNamespace
from typing import Any, Callable, Dict, Pattern, Tuple, Type
from sanic_routing.exceptions import InvalidUsage, NotFound
def parse_date(d) -> date:
return datetime.strptime(d, "%Y-%m-%d").date()
def alpha(param: str) -> str:
if not param.isalpha():
raise ValueError(f"Value {param} contains non-alphabetic chracters")
return param
def slug(param: str) -> str:
if not REGEX_TYPES["slug"][1].match(param):
raise ValueError(f"Value {param} does not match the slug format")
return param
def ext(param: str) -> Tuple[str, ...]:
parts = tuple(param.split("."))
if any(not p for p in parts) or len(parts) == 1:
raise ValueError(f"Value {param} does not match filename format")
return parts
def nonemptystr(param: str) -> str:
if not param:
raise ValueError(f"Value {param} is an empty string")
return param
class ParamInfo:
__slots__ = (
"cast",
"ctx",
"label",
"name",
"pattern",
"priority",
"raw_path",
"regex",
)
def __init__(
self,
name: str,
raw_path: str,
label: str,
cast: t.Callable[[str], t.Any],
pattern: re.Pattern,
regex: bool,
priority: int,
) -> None:
self.name = name
self.raw_path = raw_path
self.label = label
self.cast = cast
self.pattern = pattern
self.regex = regex
self.priority = priority
self.ctx = SimpleNamespace()
def process(
self,
params: t.Dict[str, t.Any],
value: t.Union[str, t.Tuple[str, ...]],
) -> None:
params[self.name] = value
class ExtParamInfo(ParamInfo):
def __init__(self, **kwargs):
super().__init__(**kwargs)
match = REGEX_PARAM_EXT_PATH.search(self.raw_path)
if not match:
raise InvalidUsage(
f"Invalid extension parameter definition: {self.raw_path}"
)
if match.group(2) == "path":
raise InvalidUsage(
"Extension parameter matching does not support the "
"`path` type."
)
ext_type = match.group(3)
regex_type = REGEX_TYPES.get(match.group(2))
self.ctx.cast = None
if regex_type:
self.ctx.cast = regex_type[0]
elif match.group(2):
raise InvalidUsage(
"Extension parameter matching only supports filename matching "
"on known parameter types, and not regular expressions."
)
self.ctx.allowed = []
self.ctx.allowed_sub_count = 0
if ext_type:
self.ctx.allowed = ext_type.split("|")
allowed_subs = {allowed.count(".") for allowed in self.ctx.allowed}
if len(allowed_subs) > 1:
raise InvalidUsage(
"All allowed extensions within a single route definition "
"must contain the same number of subparts. For example: "
"<foo:ext=js|css> and <foo:ext=min.js|min.css> are both "
"acceptable, but <foo:ext=js|min.js> is not."
)
self.ctx.allowed_sub_count = next(iter(allowed_subs))
for extension in self.ctx.allowed:
if not REGEX_ALLOWED_EXTENSION.match(extension):
raise InvalidUsage(f"Invalid extension: {extension}")
def process(self, params, value):
stop = -1 * (self.ctx.allowed_sub_count + 1)
filename = ".".join(value[:stop])
ext = ".".join(value[stop:])
if self.ctx.allowed and ext not in self.ctx.allowed:
raise NotFound(f"Invalid extension: {ext}")
if self.ctx.cast:
try:
filename = self.ctx.cast(filename)
except ValueError:
raise NotFound(f"Invalid filename: {filename}")
params[self.name] = filename
params["ext"] = ext
EXTENSION = r"[a-z0-9](?:[a-z0-9\.]*[a-z0-9])?"
PARAM_EXT = (
r"<([a-zA-Z_][a-zA-Z0-9_]*)(?:=([a-z]+))?(?::ext(?:=([a-z0-9|\.]+))?)>"
)
REGEX_PARAM_NAME = re.compile(r"^<([a-zA-Z_][a-zA-Z0-9_]*)(?::(.*))?>$")
REGEX_PARAM_EXT_PATH = re.compile(PARAM_EXT)
REGEX_PARAM_NAME_EXT = re.compile(r"^" + PARAM_EXT + r"$")
REGEX_ALLOWED_EXTENSION = re.compile(r"^" + EXTENSION + r"$")
# Predefined path parameter types. The value is a tuple consisteing of a
# callable and a compiled regular expression.
# The callable should:
# 1. accept a string input
# 2. cast the string to desired type
# 3. raise ValueError if it cannot
# The regular expression is generally NOT used. Unless the path is forced
# to use regex patterns.
REGEX_TYPES_ANNOTATION = Dict[
str, Tuple[Callable[[str], Any], Pattern, Type[ParamInfo]]
]
REGEX_TYPES: REGEX_TYPES_ANNOTATION = {
"strorempty": (str, re.compile(r"^[^/]*$"), ParamInfo),
"str": (nonemptystr, re.compile(r"^[^/]+$"), ParamInfo),
"ext": (ext, re.compile(r"^[^/]+\." + EXTENSION + r"$"), ExtParamInfo),
"slug": (slug, re.compile(r"^[a-z0-9]+(?:-[a-z0-9]+)*$"), ParamInfo),
"alpha": (alpha, re.compile(r"^[A-Za-z]+$"), ParamInfo),
"path": (str, re.compile(r"^[^/]?.*?$"), ParamInfo),
"float": (float, re.compile(r"^-?(?:\d+(?:\.\d*)?|\.\d+)$"), ParamInfo),
"int": (int, re.compile(r"^-?\d+$"), ParamInfo),
"ymd": (
parse_date,
re.compile(r"^([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]))$"),
ParamInfo,
),
"uuid": (
uuid.UUID,
re.compile(
r"^[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-"
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$"
),
ParamInfo,
),
}
|
/sanic-routing-23.6.0.tar.gz/sanic-routing-23.6.0/sanic_routing/patterns.py
| 0.64791 | 0.216198 |
patterns.py
|
pypi
|
import asyncio
import inspect
import logging
import traceback
from datetime import datetime, time, timedelta
from typing import Callable, Optional, Union
__all__ = ('task', 'SanicScheduler', 'make_task')
logger = logging.getLogger('scheduler')
_tasks = {}
_wrk = []
def make_task(fn: Callable,
period: Optional[timedelta] = None,
start: Optional[Union[timedelta, time]] = None) -> None:
"""Make task."""
_tasks[fn] = Task(fn, period, start)
def task(period: Optional[timedelta] = None,
start: Optional[Union[timedelta, time]] = None):
"""Decorate the function to run on schedule."""
def wrapper(fn):
make_task(fn, period, start)
return fn
return wrapper
class SanicScheduler:
def __init__(self, app=None, utc=True):
self.app = app
if app:
self.init_app(app, utc)
def init_app(self, app, utc=True):
self.app = app
@app.listener("after_server_start")
async def run_scheduler(_app, loop):
for i in _tasks.values():
_wrk.append(loop.create_task(i.run(_app, utc)))
@app.listener("before_server_stop")
async def stop_scheduler(_app, _):
for i in _wrk:
i.cancel()
return self
@classmethod
def task_info(cls):
return _tasks
class Task:
def __init__(self,
func: Callable,
period: Optional[timedelta],
start: Optional[Union[timedelta, time]]):
self.func = func
self.func_name = func.__name__
self.period = period
self.start = start
self.last_run = None
def _next_run(self, utc):
if utc:
now = datetime.utcnow().replace(microsecond=0)
else:
now = datetime.now().replace(microsecond=0)
if self.last_run is None:
if self.start is not None:
if isinstance(self.start, time):
d1 = datetime.combine(datetime.min, self.start)
d2 = datetime.combine(datetime.min, now.time())
self.start = timedelta(seconds=(d1 - d2).seconds)
self.last_run = now + self.start
else:
self.last_run = now
elif self.period is None:
return
else:
while self.last_run <= now:
self.last_run += self.period
return self.last_run - now
async def run(self, app, utc=True):
while True:
delta = self._next_run(utc)
if delta is None:
logger.info('STOP TASK "%s"' % self.func_name)
break
logger.debug('NEXT TASK "%s" %s' % (self.func_name, delta))
await asyncio.sleep(int(delta.total_seconds()))
logger.info('RUN TASK "%s"' % self.func_name)
try:
ret = self.func(app)
if inspect.isawaitable(ret):
await ret
logger.info('END TASK "%s"' % self.func_name)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
|
/sanic-scheduler-1.0.7.tar.gz/sanic-scheduler-1.0.7/sanic_scheduler/__init__.py
| 0.775392 | 0.161883 |
__init__.py
|
pypi
|
import functools
import logging
from fnmatch import fnmatch
from sanic.request import Request
from tortoise.exceptions import DoesNotExist
from sanic_security.authentication import authenticate
from sanic_security.exceptions import AuthorizationError
from sanic_security.models import Role, Account, AuthenticationSession
from sanic_security.utils import get_ip
"""
An effective, simple, and async security library for the Sanic framework.
Copyright (C) 2020-present Aidan Stewart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
async def check_permissions(
request: Request, *required_permissions: str
) -> AuthenticationSession:
"""
Authenticates client and determines if the account has sufficient permissions for an action.
Args:
request (Request): Sanic request parameter.
*required_permissions (Tuple[str, ...]): The permissions required to authorize an action.
Returns:
authentication_session
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
AuthorizationError
"""
authentication_session = await authenticate(request)
roles = await authentication_session.bearer.roles.filter(deleted=False).all()
for role in roles:
for required_permission, role_permission in zip(
required_permissions, role.permissions.split(", ")
):
if fnmatch(required_permission, role_permission):
return authentication_session
logging.warning(f"Client ({get_ip(request)}) has insufficient permissions.")
raise AuthorizationError("Insufficient permissions required for this action.")
async def check_roles(request: Request, *required_roles: str) -> AuthenticationSession:
"""
Authenticates client and determines if the account has sufficient roles for an action.
Args:
request (Request): Sanic request parameter.
*required_roles (Tuple[str, ...]): The roles required to authorize an action.
Returns:
authentication_session
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
AuthorizationError
"""
authentication_session = await authenticate(request)
roles = await authentication_session.bearer.roles.filter(deleted=False).all()
for role in roles:
if role.name in required_roles:
return authentication_session
logging.warning(f"Client ({get_ip(request)}) has insufficient roles.")
raise AuthorizationError("Insufficient roles required for this action.")
def require_permissions(*required_permissions: str):
"""
Authenticates client and determines if the account has sufficient permissions for an action.
Args:
*required_permissions (Tuple[str, ...]): The permissions required to authorize an action.
Example:
This method is not called directly and instead used as a decorator:
@app.post("api/auth/perms")
@require_permissions("admin:update", "employee:add")
async def on_require_perms(request):
return text("Account permitted.")
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
AuthorizationError
"""
def decorator(func):
@functools.wraps(func)
async def wrapper(request, *args, **kwargs):
request.ctx.authentication_session = await check_permissions(
request, *required_permissions
)
return await func(request, *args, **kwargs)
return wrapper
return decorator
def require_roles(*required_roles: str):
"""
Authenticates client and determines if the account has sufficient roles for an action.
Args:
*required_roles (Tuple[str, ...]): The roles required to authorize an action.
Example:
This method is not called directly and instead used as a decorator:
@app.post("api/auth/roles")
@require_roles("Admin", "Moderator")
async def on_require_roles(request):
return text("Account permitted")
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
AuthorizationError
"""
def decorator(func):
@functools.wraps(func)
async def wrapper(request, *args, **kwargs):
request.ctx.authentication_session = await check_roles(
request, *required_roles
)
return await func(request, *args, **kwargs)
return wrapper
return decorator
async def assign_role(
name: str, account: Account, permissions: str = None, description: str = None
) -> Role:
"""
Easy account role assignment. Role being assigned to an account will be created if it doesn't exist.
Args:
name (str): The name of the role associated with the account.
account (Account): The account associated with the created role.
permissions (str): The permissions of the role associated with the account. Permissions must be separated via comma and in wildcard format.
description (str): The description of the role associated with the account.
"""
try:
role = await Role.filter(name=name).get()
except DoesNotExist:
role = await Role.create(
description=description, permissions=permissions, name=name
)
await account.roles.add(role)
return role
|
/sanic_security-1.11.7-py3-none-any.whl/sanic_security/authorization.py
| 0.839603 | 0.206334 |
authorization.py
|
pypi
|
import functools
from contextlib import suppress
from sanic.request import Request
from sanic_security.exceptions import (
JWTDecodeError,
NotFoundError,
VerifiedError,
)
from sanic_security.models import (
Account,
TwoStepSession,
CaptchaSession,
)
"""
An effective, simple, and async security library for the Sanic framework.
Copyright (C) 2020-present Aidan Stewart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
async def request_two_step_verification(
request: Request, account: Account = None
) -> TwoStepSession:
"""
Creates a two-step session and deactivates the client's current two-step session if found.
Args:
request (Request): Sanic request parameter. Request body should contain form-data with the following argument(s): email.
account (Account): The account being associated with the new verification session. If None, an account is retrieved via the email in the request form-data or an existing two-step session.
Raises:
NotFoundError
Returns:
two_step_session
"""
with suppress(NotFoundError, JWTDecodeError):
two_step_session = await TwoStepSession.decode(request)
if two_step_session.active:
await two_step_session.deactivate()
if not account:
account = two_step_session.bearer
if request.form.get("email") or not account:
account = await Account.get_via_email(request.form.get("email"))
two_step_session = await TwoStepSession.new(request, account)
return two_step_session
async def two_step_verification(request: Request) -> TwoStepSession:
"""
Validates a two-step verification attempt.
Args:
request (Request): Sanic request parameter. Request body should contain form-data with the following argument(s): code.
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
ChallengeError
MaxedOutChallengeError
Returns:
two_step_session
"""
two_step_session = await TwoStepSession.decode(request)
two_step_session.validate()
two_step_session.bearer.validate()
await two_step_session.check_code(request, request.form.get("code"))
return two_step_session
def requires_two_step_verification(arg=None):
"""
Validates a two-step verification attempt.
Example:
This method is not called directly and instead used as a decorator:
@app.post("api/verification/attempt")
@requires_two_step_verification
async def on_verified(request):
response = json("Two-step verification attempt successful!", two_step_session.json())
return response
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
ChallengeError
MaxedOutChallengeError
"""
def decorator(func):
@functools.wraps(func)
async def wrapper(request, *args, **kwargs):
request.ctx.two_step_session = await two_step_verification(request)
return await func(request, *args, **kwargs)
return wrapper
if callable(arg):
return decorator(arg)
else:
return decorator
async def verify_account(request: Request) -> TwoStepSession:
"""
Verifies the client's account via two-step session code.
Args:
request (Request): Sanic request parameter. Request body should contain form-data with the following argument(s): code.
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
ChallengeError
MaxedOutChallengeError
VerifiedError
Returns:
two_step_session
"""
two_step_session = await TwoStepSession.decode(request)
if two_step_session.bearer.verified:
raise VerifiedError()
two_step_session.validate()
await two_step_session.check_code(request, request.form.get("code"))
two_step_session.bearer.verified = True
await two_step_session.bearer.save(update_fields=["verified"])
return two_step_session
async def request_captcha(request: Request) -> CaptchaSession:
"""
Creates a captcha session and deactivates the client's current captcha session if found.
Args:
request (Request): Sanic request parameter.
Returns:
captcha_session
"""
with suppress(NotFoundError, JWTDecodeError):
captcha_session = await CaptchaSession.decode(request)
if captcha_session.active:
await captcha_session.deactivate()
return await CaptchaSession.new(request)
async def captcha(request: Request) -> CaptchaSession:
"""
Validates a captcha challenge attempt.
Args:
request (Request): Sanic request parameter. Request body should contain form-data with the following argument(s): captcha.
Raises:
DeletedError
ExpiredError
DeactivatedError
JWTDecodeError
NotFoundError
ChallengeError
MaxedOutChallengeError
Returns:
captcha_session
"""
captcha_session = await CaptchaSession.decode(request)
captcha_session.validate()
await captcha_session.check_code(request, request.form.get("captcha"))
return captcha_session
def requires_captcha(arg=None):
"""
Validates a captcha challenge attempt.
Example:
This method is not called directly and instead used as a decorator:
@app.post("api/captcha/attempt")
@requires_captcha
async def on_captcha_attempt(request):
return json("Captcha attempt successful!", captcha_session.json())
Raises:
DeletedError
ExpiredError
DeactivatedError
JWTDecodeError
NotFoundError
ChallengeError
MaxedOutChallengeError
"""
def decorator(func):
@functools.wraps(func)
async def wrapper(request, *args, **kwargs):
request.ctx.captcha_session = await captcha(request)
return await func(request, *args, **kwargs)
return wrapper
if callable(arg):
return decorator(arg)
else:
return decorator
|
/sanic_security-1.11.7-py3-none-any.whl/sanic_security/verification.py
| 0.78609 | 0.28198 |
verification.py
|
pypi
|
import datetime
import random
import string
from sanic.request import Request
from sanic.response import json as sanic_json, HTTPResponse
"""
An effective, simple, and async security library for the Sanic framework.
Copyright (C) 2020-present Aidan Stewart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
def get_ip(request: Request) -> str:
"""
Retrieves ip address from client request.
Args:
request (Request): Sanic request parameter.
Returns:
ip
"""
return request.remote_addr or request.ip
def get_code() -> str:
"""
Generates random code to be used for verification.
Returns:
code
"""
return "".join(random.choices(string.digits + string.ascii_uppercase, k=6))
def json(message: str, data, status_code: int = 200) -> HTTPResponse:
"""
A preformatted Sanic json response.
Args:
message (int): Message describing data or relaying human-readable information.
data (Any): Raw information to be used by client.
status_code (int): HTTP response code.
Returns:
json
"""
return sanic_json(
{"message": message, "code": status_code, "data": data}, status=status_code
)
def get_expiration_date(seconds: int) -> datetime.datetime:
"""
Retrieves the date after which something (such as a session) is no longer valid.
Args:
seconds: Seconds added to current time.
Returns:
expiration_date
"""
return (
datetime.datetime.utcnow() + datetime.timedelta(seconds=seconds)
if seconds > 0
else None
)
|
/sanic_security-1.11.7-py3-none-any.whl/sanic_security/utils.py
| 0.742235 | 0.162879 |
utils.py
|
pypi
|
import base64
import functools
import re
from argon2 import PasswordHasher
from argon2.exceptions import VerifyMismatchError
from sanic import Sanic
from sanic.log import logger
from sanic.request import Request
from tortoise.exceptions import DoesNotExist
from sanic_security.configuration import config as security_config
from sanic_security.exceptions import (
NotFoundError,
CredentialsError,
DeactivatedError,
SecondFactorFulfilledError,
)
from sanic_security.models import Account, AuthenticationSession, Role, TwoStepSession
from sanic_security.utils import get_ip
"""
An effective, simple, and async security library for the Sanic framework.
Copyright (C) 2020-present Aidan Stewart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
password_hasher = PasswordHasher()
def validate_email(email: str) -> str:
"""
Validates email format.
Args:
email (str): Email being validated.
Returns:
email
Raises:
CredentialsError
"""
if not re.search(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", email):
raise CredentialsError("Please use a valid email address.", 400)
return email
def validate_username(username: str) -> str:
"""
Validates username format.
Args:
username (str): Username being validated.
Returns:
username
Raises:
CredentialsError
"""
if not re.search(r"^[A-Za-z0-9_-]{3,32}$", username):
raise CredentialsError(
"Username must be between 3-32 characters and not contain any special characters other than _ or -.",
400,
)
return username
def validate_phone(phone: str) -> str:
"""
Validates phone number format.
Args:
phone (str): Phone number being validated.
Returns:
phone
Raises:
CredentialsError
"""
if phone and not re.search(
r"^(\+\d{1,2}\s)?\(?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4}$", phone
):
raise CredentialsError("Please use a valid phone number.", 400)
return phone
def validate_password(password: str) -> str:
"""
Validates password requirements.
Args:
password (str): Password being validated.
Returns:
password
Raises:
CredentialsError
"""
if not re.search(r"^(?=.*[A-Z])(?=.*\d)(?=.*[@#$%^&+=!]).*$", password):
raise CredentialsError(
"Password must contain one capital letter, one number, and one special character",
400,
)
return password
async def register(
request: Request, verified: bool = False, disabled: bool = False
) -> Account:
"""
Registers a new account that can be logged into.
Args:
request (Request): Sanic request parameter. Request body should contain form-data with the following argument(s): email, username, password, phone (including country code).
verified (bool): Sets the verification requirement for the account being registered.
disabled (bool): Renders the account being registered unusable.
Returns:
account
Raises:
CredentialsError
"""
email_lower = validate_email(request.form.get("email").lower())
if await Account.filter(email=email_lower).exists():
raise CredentialsError("An account with this email already exists.", 409)
elif await Account.filter(
username=validate_username(request.form.get("username"))
).exists():
raise CredentialsError("An account with this username already exists.", 409)
elif (
request.form.get("phone")
and await Account.filter(
phone=validate_phone(request.form.get("phone"))
).exists()
):
raise CredentialsError("An account with this phone number already exists.", 409)
validate_password(request.form.get("password"))
account = await Account.create(
email=email_lower,
username=request.form.get("username"),
password=password_hasher.hash(request.form.get("password")),
phone=request.form.get("phone"),
verified=verified,
disabled=disabled,
)
return account
async def login(
request: Request, account: Account = None, require_second_factor: bool = False
) -> AuthenticationSession:
"""
Login with email or username (if enabled) and password.
Args:
request (Request): Sanic request parameter. Login credentials are retrieved via the authorization header.
account (Account): Account being logged into, overrides retrieving account via email or username in form-data.
require_second_factor (bool): Determines authentication session second factor requirement on login.
Returns:
authentication_session
Raises:
CredentialsError
NotFoundError
DeletedError
UnverifiedError
DisabledError
"""
if request.headers.get("Authorization"):
authorization_type, credentials = request.headers.get("Authorization").split()
if authorization_type == "Basic":
email_or_username, password = (
base64.b64decode(credentials).decode().split(":")
)
else:
raise CredentialsError("Invalid authorization type.")
else:
raise CredentialsError("Credentials not provided.")
if not account:
try:
account = await Account.get_via_email(email_or_username.lower())
except NotFoundError as e:
if security_config.ALLOW_LOGIN_WITH_USERNAME:
account = await Account.get_via_username(email_or_username)
else:
raise e
try:
password_hasher.verify(account.password, password)
if password_hasher.check_needs_rehash(account.password):
account.password = password_hasher.hash(password)
await account.save(update_fields=["password"])
account.validate()
return await AuthenticationSession.new(
request, account, requires_second_factor=require_second_factor
)
except VerifyMismatchError:
logger.warning(
f"Client ({get_ip(request)}) login password attempt is incorrect"
)
raise CredentialsError("Incorrect password.", 401)
async def logout(request: Request) -> AuthenticationSession:
"""
Deactivates client's authentication session.
Args:
request (Request): Sanic request parameter.
Raises:
NotFoundError
JWTDecodeError
DeactivatedError
Returns:
authentication_session
"""
authentication_session = await AuthenticationSession.decode(request)
if not authentication_session.active:
raise DeactivatedError("Already logged out.", 403)
authentication_session.active = False
await authentication_session.save(update_fields=["active"])
return authentication_session
async def authenticate(request: Request) -> AuthenticationSession:
"""
Validates client's authentication session and account.
Args:
request (Request): Sanic request parameter.
Returns:
authentication_session
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
SecondFactorRequiredError
"""
authentication_session = await AuthenticationSession.decode(request)
authentication_session.validate()
authentication_session.bearer.validate()
return authentication_session
async def fulfill_second_factor(request: Request) -> AuthenticationSession:
"""
Fulfills client authentication session's second factor requirement via two-step session code.
Args:
request (Request): Sanic request parameter. Request body should contain form-data with the following argument(s): code.
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
ChallengeError
MaxedOutChallengeError
SecondFactorFulfilledError
Returns:
authentication_Session
"""
authentication_session = await AuthenticationSession.decode(request)
two_step_session = await TwoStepSession.decode(request)
if not authentication_session.requires_second_factor:
raise SecondFactorFulfilledError()
two_step_session.validate()
await two_step_session.check_code(request, request.form.get("code"))
authentication_session.requires_second_factor = False
await authentication_session.save(update_fields=["requires_second_factor"])
return authentication_session
def requires_authentication(arg=None):
"""
Validates client's authentication session and account.
Example:
This method is not called directly and instead used as a decorator:
@app.post('api/authenticate')
@requires_authentication
async def on_authenticate(request):
return text('User is authenticated!')
Raises:
NotFoundError
JWTDecodeError
DeletedError
ExpiredError
DeactivatedError
UnverifiedError
DisabledError
"""
def decorator(func):
@functools.wraps(func)
async def wrapper(request, *args, **kwargs):
request.ctx.authentication_session = await authenticate(request)
return await func(request, *args, **kwargs)
return wrapper
if callable(arg):
return decorator(arg)
else:
return decorator
def create_initial_admin_account(app: Sanic) -> None:
"""
Creates the initial admin account that can be logged into and has complete authoritative access.
Args:
app (Sanic): The main Sanic application instance.
"""
@app.listener("before_server_start")
async def generate(app, loop):
try:
role = await Role.filter(name="Head Admin").get()
except DoesNotExist:
role = await Role.create(
description="Has the ability to control any aspect of the API, assign sparingly.",
permissions="*:*",
name="Head Admin",
)
try:
account = await Account.filter(
email=security_config.INITIAL_ADMIN_EMAIL
).get()
await account.fetch_related("roles")
if role not in account.roles:
await account.roles.add(role)
logger.warning(
'The initial admin account role "Head Admin" was removed and has been reinstated.'
)
except DoesNotExist:
account = await Account.create(
username="Head-Admin",
email=security_config.INITIAL_ADMIN_EMAIL,
password=PasswordHasher().hash(security_config.INITIAL_ADMIN_PASSWORD),
verified=True,
)
await account.roles.add(role)
logger.info("Initial admin account created.")
|
/sanic_security-1.11.7-py3-none-any.whl/sanic_security/authentication.py
| 0.654453 | 0.176388 |
authentication.py
|
pypi
|
from sanic.exceptions import SanicException
from sanic_security.utils import json
"""
An effective, simple, and async security library for the Sanic framework.
Copyright (C) 2020-present Aidan Stewart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
class SecurityError(SanicException):
"""
Sanic Security related error.
Attributes:
json (HTTPResponse): Security error json response.
Args:
message (str): Human readable error message.
code (int): HTTP error code.
"""
def __init__(self, message: str, code: int):
self.json = json(message, self.__class__.__name__, code)
super().__init__(message, code)
class NotFoundError(SecurityError):
"""
Raised when a resource cannot be found.
"""
def __init__(self, message):
super().__init__(message, 404)
class DeletedError(SecurityError):
"""
Raised when attempting to access a deleted resource.
"""
def __init__(self, message):
super().__init__(message, 410)
class AccountError(SecurityError):
"""
Base account error that all other account errors derive from.
"""
def __init__(self, message, code):
super().__init__(message, code)
class DisabledError(AccountError):
"""
Raised when account is disabled.
"""
def __init__(self, message: str = "Account is disabled.", code: int = 401):
super().__init__(message, code)
class UnverifiedError(AccountError):
"""
Raised when account is unverified.
"""
def __init__(self):
super().__init__("Account requires verification.", 401)
class VerifiedError(AccountError):
"""
Raised when account is already verified.
"""
def __init__(self):
super().__init__("Account already verified.", 403)
class SessionError(SecurityError):
"""
Base session error that all other session errors derive from.
"""
def __init__(self, message, code=401):
super().__init__(message, code)
class JWTDecodeError(SessionError):
"""
Raised when client JWT is invalid.
"""
def __init__(self, message, code=400):
super().__init__(message, code)
class DeactivatedError(SessionError):
"""
Raised when session is deactivated.
"""
def __init__(self, message: str = "Session is deactivated.", code: int = 401):
super().__init__(message, code)
class ExpiredError(SessionError):
"""
Raised when session has expired.
"""
def __init__(self):
super().__init__("Session has expired")
class SecondFactorRequiredError(SessionError):
"""
Raised when authentication session two-factor requirement isn't met.
"""
def __init__(self):
super().__init__("Session requires second factor for authentication.")
class SecondFactorFulfilledError(SessionError):
"""
Raised when authentication session two-factor requirement is already met.
"""
def __init__(self):
super().__init__("Session second factor requirement already met.", 403)
class ChallengeError(SessionError):
"""
Raised when a session challenge attempt is invalid.
"""
def __init__(self, message):
super().__init__(message)
class MaxedOutChallengeError(ChallengeError):
"""
Raised when a session's challenge attempt limit is reached.
"""
def __init__(self):
super().__init__("The maximum amount of attempts has been reached.")
class AuthorizationError(SecurityError):
"""
Raised when an account has insufficient permissions or roles for an action.
"""
def __init__(self, message):
super().__init__(message, 403)
class CredentialsError(SecurityError):
"""
Raised when credentials are invalid.
"""
def __init__(self, message, code=400):
super().__init__(message, code)
|
/sanic_security-1.11.7-py3-none-any.whl/sanic_security/exceptions.py
| 0.844922 | 0.219547 |
exceptions.py
|
pypi
|
from os import environ
from sanic.utils import str_to_bool
"""
An effective, simple, and async security library for the Sanic framework.
Copyright (C) 2020-present Aidan Stewart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
DEFAULT_CONFIG = {
"SECRET": "This is a big secret. Shhhhh",
"PUBLIC_SECRET": None,
"SESSION_SAMESITE": "strict",
"SESSION_SECURE": True,
"SESSION_HTTPONLY": True,
"SESSION_DOMAIN": None,
"SESSION_PREFIX": "token",
"SESSION_ENCODING_ALGORITHM": "HS256",
"MAX_CHALLENGE_ATTEMPTS": 5,
"CAPTCHA_SESSION_EXPIRATION": 60,
"CAPTCHA_FONT": "captcha-font.ttf",
"TWO_STEP_SESSION_EXPIRATION": 200,
"AUTHENTICATION_SESSION_EXPIRATION": 2592000,
"ALLOW_LOGIN_WITH_USERNAME": False,
"INITIAL_ADMIN_EMAIL": "[email protected]",
"INITIAL_ADMIN_PASSWORD": "admin123",
"TEST_DATABASE_URL": "sqlite://:memory:",
}
class Config(dict):
"""
Sanic Security configuration.
Attributes:
SECRET (str): The secret used by the hashing algorithm for generating and signing JWTs. This should be a string unique to your application. Keep it safe.
PUBLIC_SECRET (str): The secret used for verifying and decoding JWTs and can be publicly shared. This should be a string unique to your application.
SESSION_SAMESITE (str): The SameSite attribute of session cookies.
SESSION_SECURE (bool): The Secure attribute of session cookies.
SESSION_HTTPONLY (bool): The HttpOnly attribute of session cookies. HIGHLY recommended that you do not turn this off, unless you know what you are doing.
SESSION_DOMAIN (bool): The Domain attribute of session cookies.
SESSION_ENCODING_ALGORITHM (str): The algorithm used to encode sessions to a JWT.
SESSION_PREFIX (str): Prefix attached to the beginning of session cookies.
MAX_CHALLENGE_ATTEMPTS (str): The maximum amount of session challenge attempts allowed.
CAPTCHA_SESSION_EXPIRATION (int): The amount of seconds till captcha session expiration on creation. Setting to 0 will disable expiration.
CAPTCHA_FONT (str): The file path to the font being used for captcha generation.
TWO_STEP_SESSION_EXPIRATION (int): The amount of seconds till two step session expiration on creation. Setting to 0 will disable expiration.
AUTHENTICATION_SESSION_EXPIRATION (bool): The amount of seconds till authentication session expiration on creation. Setting to 0 will disable expiration.
ALLOW_LOGIN_WITH_USERNAME (bool): Allows login via username and email.
INITIAL_ADMIN_EMAIL (str): Email used when creating the initial admin account.
INITIAL_ADMIN_PASSWORD (str): Password used when creating the initial admin account.
TEST_DATABASE_URL (str): Database URL for connecting to the database Sanic Security will use for testing
"""
SECRET: str
PUBLIC_SECRET: str
SESSION_SAMESITE: str
SESSION_SECURE: bool
SESSION_HTTPONLY: bool
SESSION_DOMAIN: str
SESSION_ENCODING_ALGORITHM: str
SESSION_PREFIX: str
MAX_CHALLENGE_ATTEMPTS: int
CAPTCHA_SESSION_EXPIRATION: int
CAPTCHA_FONT: str
TWO_STEP_SESSION_EXPIRATION: int
AUTHENTICATION_SESSION_EXPIRATION: int
ALLOW_LOGIN_WITH_USERNAME: bool
INITIAL_ADMIN_EMAIL: str
INITIAL_ADMIN_PASSWORD: str
TEST_DATABASE_URL: str
def load_environment_variables(self, load_env="SANIC_SECURITY_") -> None:
"""
Any environment variables defined with the prefix argument will be applied to the config.
Args:
load_env (str): Prefix being used to apply environment variables into the config.
"""
for key, value in environ.items():
if not key.startswith(load_env):
continue
_, config_key = key.split(load_env, 1)
for converter in (int, float, str_to_bool, str):
try:
self[config_key] = converter(value)
break
except ValueError:
pass
def __init__(self):
super().__init__(DEFAULT_CONFIG)
self.__dict__ = self
self.load_environment_variables()
config = Config()
|
/sanic_security-1.11.7-py3-none-any.whl/sanic_security/configuration.py
| 0.722821 | 0.196441 |
configuration.py
|
pypi
|
from typing import Callable
from sanic_session.base import BaseSessionInterface
try:
import asyncio_redis
except ImportError:
asyncio_redis = None
class RedisSessionInterface(BaseSessionInterface):
def __init__(
self,
redis_getter: Callable,
domain: str = None,
expiry: int = 2592000,
httponly: bool = True,
cookie_name: str = "session",
prefix: str = "session:",
sessioncookie: bool = False,
samesite: str = None,
session_name: str = "session",
secure: bool = False,
):
"""Initializes a session interface backed by Redis.
Args:
redis_getter (Callable):
Coroutine which should return an asyncio_redis connection pool
(suggested) or an asyncio_redis Redis connection.
domain (str, optional):
Optional domain which will be attached to the cookie.
expiry (int, optional):
Seconds until the session should expire.
httponly (bool, optional):
Adds the `httponly` flag to the session cookie.
cookie_name (str, optional):
Name used for the client cookie.
prefix (str, optional):
Memcache keys will take the format of `prefix+session_id`;
specify the prefix here.
sessioncookie (bool, optional):
Specifies if the sent cookie should be a 'session cookie', i.e
no Expires or Max-age headers are included. Expiry is still
fully tracked on the server side. Default setting is False.
samesite (str, optional):
Will prevent the cookie from being sent by the browser to the
target site in all cross-site browsing context, even when
following a regular link.
One of ('lax', 'strict')
Default: None
session_name (str, optional):
Name of the session that will be accessible through the
request.
e.g. If ``session_name`` is ``alt_session``, it should be
accessed like that: ``request.ctx.alt_session``
e.g. And if ``session_name`` is left to default, it should be
accessed like that: ``request.ctx.session``
Default: 'session'
secure (bool, optional):
Adds the `Secure` flag to the session cookie.
"""
if asyncio_redis is None:
raise RuntimeError("Please install asyncio_redis: pip install sanic_session[redis]")
self.redis_getter = redis_getter
super().__init__(
expiry=expiry,
prefix=prefix,
cookie_name=cookie_name,
domain=domain,
httponly=httponly,
sessioncookie=sessioncookie,
samesite=samesite,
session_name=session_name,
secure=secure,
)
async def _get_value(self, prefix, key):
redis_connection = await self.redis_getter()
return await redis_connection.get(prefix + key)
async def _delete_key(self, key):
redis_connection = await self.redis_getter()
await redis_connection.delete([key])
async def _set_value(self, key, data):
redis_connection = await self.redis_getter()
await redis_connection.setex(key, self.expiry, data)
|
/sanic_session-0.8.0.tar.gz/sanic_session-0.8.0/sanic_session/redis.py
| 0.825062 | 0.197541 |
redis.py
|
pypi
|
from sanic_session.base import BaseSessionInterface
try:
import aioredis
except ImportError:
aioredis = None
class AIORedisSessionInterface(BaseSessionInterface):
def __init__(
self,
redis,
domain: str = None,
expiry: int = 2592000,
httponly: bool = True,
cookie_name: str = "session",
prefix: str = "session:",
sessioncookie: bool = False,
samesite: str = None,
session_name: str = "session",
secure: bool = False,
):
"""Initializes a session interface backed by Redis.
Args:
redis (Callable):
aioredis connection or connection pool instance.
domain (str, optional):
Optional domain which will be attached to the cookie.
expiry (int, optional):
Seconds until the session should expire.
httponly (bool, optional):
Adds the `httponly` flag to the session cookie.
cookie_name (str, optional):
Name used for the client cookie.
prefix (str, optional):
Memcache keys will take the format of `prefix+session_id`;
specify the prefix here.
sessioncookie (bool, optional):
Specifies if the sent cookie should be a 'session cookie', i.e
no Expires or Max-age headers are included. Expiry is still
fully tracked on the server side. Default setting is False.
samesite (str, optional):
Will prevent the cookie from being sent by the browser to the target
site in all cross-site browsing context, even when following a regular link.
One of ('lax', 'strict')
Default: None
session_name (str, optional):
Name of the session that will be accessible through the request.
e.g. If ``session_name`` is ``alt_session``, it should be
accessed like that: ``request.ctx.alt_session``
e.g. And if ``session_name`` is left to default, it should be
accessed like that: ``request.ctx.session``
Default: 'session'
secure (bool, optional):
Adds the `Secure` flag to the session cookie.
"""
if aioredis is None:
raise RuntimeError("Please install aioredis: pip install sanic_session[aioredis]")
self.redis = redis
super().__init__(
expiry=expiry,
prefix=prefix,
cookie_name=cookie_name,
domain=domain,
httponly=httponly,
sessioncookie=sessioncookie,
samesite=samesite,
session_name=session_name,
secure=secure,
)
async def _get_value(self, prefix, sid):
return await self.redis.get(self.prefix + sid)
async def _delete_key(self, key):
await self.redis.delete(key)
async def _set_value(self, key, data):
await self.redis.setex(key, self.expiry, data)
|
/sanic_session-0.8.0.tar.gz/sanic_session-0.8.0/sanic_session/aioredis.py
| 0.698741 | 0.171408 |
aioredis.py
|
pypi
|
import json
from collections import OrderedDict
class SseEvent(object):
def __init__(self, event=None):
self.data = None
self.options = OrderedDict({
"event": event,
})
@property
def to_string(self):
raise NotImplementedError("to_string method must be implemented")
@property
def to_dict(self):
return dict(**self.options)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.to_string == other.to_string
def __repr__(self):
kwargs_repr = ", ".join(
[f"{key}={value!r}" for key, value in self.options.items() if value is not None]
)
return f"{self.__class__.__name__}({kwargs_repr})"
def __str__(self):
return self.to_string
def clone(self):
return self.__class__(**self.to_dict)
def check_event(self, event):
return self.options["event"] == event
class DataEvent(SseEvent):
mapping = OrderedDict(
event=lambda e: f"event: {e}",
data=lambda d: f"data: {json.dumps(d)}",
event_id=lambda i: f"id: {i}",
retry=lambda r: f"retry: {r}"
)
def __init__(self, data, event=None, event_id=None, retry=None):
if not data:
raise ValueError(f"data cannot be None")
self.data = data
self.options = OrderedDict({
"event": event,
"event_id": event_id,
"retry": retry,
})
@property
def to_dict(self):
return dict(data=self.data, **self.options)
@property
def to_string(self):
info = self.to_dict
lines = []
for key, transfer in self.mapping.items():
value = info.get(key)
if not value:
continue
lines.append(transfer(value))
return "\n".join(lines) + "\n\n"
def __repr__(self):
kwargs_repr = ", ".join(
[f"{key}={value!r}" for key, value in self.options.items() if value is not None]
)
return f"{self.__class__.__name__}(data={self.data!r}, {kwargs_repr})"
class ControlEvent(SseEvent):
EVENT_PING = "ping"
EVENT_TERMINATION = "termination"
@property
def to_string(self):
return f": {self.options['event']}\n\n"
def test_event():
e1 = DataEvent({"test": 1}, "fetch", "f1")
print(repr(e1))
e2 = e1.clone()
print(e1 == e2)
print(e2.to_string)
ping_event = ControlEvent(ControlEvent.EVENT_PING)
print(ping_event.clone().to_string)
termination_event = ControlEvent(ControlEvent.EVENT_TERMINATION)
print(repr(termination_event))
print(termination_event.clone().to_string)
if __name__ == '__main__':
test_event()
|
/sanic-sse-py3-1.0.6.tar.gz/sanic-sse-py3-1.0.6/sse/core/event.py
| 0.667798 | 0.228393 |
event.py
|
pypi
|
import asyncio
import uuid
from collections import defaultdict
from typing import Dict
class _StopMessage: # pylint: disable=too-few-public-methods
pass
class PubSub:
"""
Implementation of publish/subscriber protocol
"""
def __init__(self):
self._channels = defaultdict(dict)
def publish(self, data: str, channel_id: str = None):
"""
Publish data to all subscribers or to channel with provided channel_id.
This call is blocking.
:param str data: The data to publush
:param str channel_id: If given then data will be send only to channel with that id
"""
return asyncio.gather(
*[client.put(data) for client in self._channels[channel_id].values()]
)
def register(self, channel_id: str = None):
"""
Register new subscriber
Return identifier of subscriber (str)
"""
client_id = str(uuid.uuid4())
q = asyncio.Queue()
self._channels[channel_id][client_id] = q
if channel_id:
self._channels[None][client_id] = q
return client_id
def delete(self, client_id: str, channel_id: str = None):
"""
Delete subscriber by given channel_id
:param str client_id: Identifier of client
:param str channel_id: Identifier of channel
"""
try:
del self._channels[channel_id][client_id]
if len(self._channels[channel_id]) == 0:
del self._channels[channel_id]
if channel_id:
del self._channels[None][client_id]
except KeyError:
return False
return True
async def get(self, client_id: str, channel_id: str = None):
"""
Return data for given subscriber. This call is blocking.
:param str client_id: Identifier of client
:param str channel_id: Identifier of channel
Return received data (str)
"""
data = await self._channels[channel_id][client_id].get()
if isinstance(data, _StopMessage):
self.delete(client_id, channel_id)
raise ValueError("Stop message received")
return data
def task_done(self, client_id: str, channel_id: str = None):
"""
Notify that current data was processed
:param str client_id: Identifier of client
:param str channel_id: Identifier of channel
"""
self._channels[channel_id][client_id].task_done()
async def close(self):
"""
Close all subscribers
"""
await self.publish(_StopMessage())
|
/sanic_sse-0.3.1.tar.gz/sanic_sse-0.3.1/sanic_sse/pub_sub.py
| 0.776792 | 0.219756 |
pub_sub.py
|
pypi
|
from sanic.response import json
from cerberus import Validator
from functools import wraps
JSON_DATA_ENTRY_TYPE = 'json_data_property'
QUERY_ARG_ENTRY_TYPE = 'query_argument'
REQ_BODY_ENTRY_TYPE = 'request_body'
def validate_json(schema, clean=False, status_code=400):
'''Decorator. Validates request body json.
When *clean* is true, normalized data is passed to the decorated method
as *valid_json*.
Args:
schema (dict): Cerberus-compatible schema description
clean (bool): should cleaned json be passed to the decorated method
status_code (number): status code to return when data is incorrect
'''
validator = Validator(schema)
def vd(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
if request.json is None:
return _request_body_not_json_response()
validation_passed = validator.validate(request.json or {})
if validation_passed:
if clean:
kwargs['valid_json'] = validator.document
return f(request, *args, **kwargs)
else:
return _validation_failed_response(validator,
JSON_DATA_ENTRY_TYPE,
status_code)
return wrapper
return vd
def validate_args(schema, clean=False, status_code=400):
'''Decorator. Validates querystring arguments.
When *clean* is True, normalized data is passed to the decorated method
as *valid_args*.
Args:
schema (dict): Cerberus-compatible schema description
clean (bool): should cleaned args be passed to the decorated method
status_code (number): status code to return when data is incorrect
'''
validator = Validator(schema)
def vd(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
validation_passed = validator.validate(dict(request.query_args))
if validation_passed:
if clean:
kwargs['valid_args'] = validator.document
return f(request, *args, **kwargs)
else:
return _validation_failed_response(validator,
QUERY_ARG_ENTRY_TYPE,
status_code)
return wrapper
return vd
def _validation_failed_response(validator, entry_type, status_code=400):
return json(
{
'error': {
'type': 'validation_failed',
'message': 'Validation failed.',
'invalid': _validation_failures_list(validator, entry_type)
}
},
status=status_code)
def _validation_failures_list(validator, entry_type):
return [
_validation_error_description(error, entry_type)
for error in _document_errors(validator)
]
def _document_errors(validator):
return _traverse_tree(validator.document_error_tree)
def _traverse_tree(node):
if not node.descendants:
yield from node.errors
for k in node.descendants:
yield from _traverse_tree(node.descendants[k])
def _validation_error_description(error, entry_type):
print(repr(error.schema_path))
return {
'entry_type': entry_type,
'entry': _path_to_field(error),
'rule': _rule(error),
'constraint': _constraint(error)
}
def _path_to_field(error):
return '.'.join(map(str, error.document_path))
def _rule(error):
return error.rule or 'allowed_field'
def _constraint(error):
if error.rule == 'coerce':
return True
return error.constraint or False
def _request_body_not_json_response():
return json(
{
'error': {
'type': 'unsupported_media_type',
'message': 'Expected JSON body.',
'invalid': [{
'entry_type': REQ_BODY_ENTRY_TYPE,
'entry': '',
'rule': 'json',
'constraint': True
}],
}
},
status=415)
|
/sanic-validation-0.5.1.tar.gz/sanic-validation-0.5.1/sanic_validation/decorators.py
| 0.733833 | 0.177063 |
decorators.py
|
pypi
|
from sanic.exceptions import InvalidUsage
from sanic.constants import HTTP_METHODS
class HTTPMethodView:
"""Simple class based implementation of view for the sanic.
You should implement methods (get, post, put, patch, delete) for the class
to every HTTP method you want to support.
For example:
.. code-block:: python
class DummyView(HTTPMethodView):
def get(self, request, *args, **kwargs):
return text('I am get method')
def put(self, request, *args, **kwargs):
return text('I am put method')
etc.
If someone tries to use a non-implemented method, there will be a
405 response.
If you need any url params just mention them in method definition:
.. code-block:: python
class DummyView(HTTPMethodView):
def get(self, request, my_param_here, *args, **kwargs):
return text('I am get method with %s' % my_param_here)
To add the view into the routing you could use
1) app.add_route(DummyView.as_view(), '/')
2) app.route('/')(DummyView.as_view())
To add any decorator you could set it into decorators variable
"""
decorators = []
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
return handler(request, *args, **kwargs)
@classmethod
def as_view(cls, *class_args, **class_kwargs):
"""Return view function for use with the routing system, that
dispatches request to appropriate handler method.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
view.view_class = cls
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.__name__ = cls.__name__
return view
def stream(func):
func.is_stream = True
return func
class CompositionView:
"""Simple method-function mapped view for the sanic.
You can add handler functions to methods (get, post, put, patch, delete)
for every HTTP method you want to support.
For example:
view = CompositionView()
view.add(['GET'], lambda request: text('I am get method'))
view.add(['POST', 'PUT'], lambda request: text('I am post/put method'))
etc.
If someone tries to use a non-implemented method, there will be a
405 response.
"""
def __init__(self):
self.handlers = {}
def add(self, methods, handler, stream=False):
if stream:
handler.is_stream = stream
for method in methods:
if method not in HTTP_METHODS:
raise InvalidUsage(
'{} is not a valid HTTP method.'.format(method))
if method in self.handlers:
raise InvalidUsage(
'Method {} is already registered.'.format(method))
self.handlers[method] = handler
def __call__(self, request, *args, **kwargs):
handler = self.handlers[request.method.upper()]
return handler(request, *args, **kwargs)
|
/sanic-win-0.6.1.tar.gz/sanic-win-0.6.1/sanic/views.py
| 0.82151 | 0.164617 |
views.py
|
pypi
|
from .base import BaseSessionInterface
def check_aiomcache_installed():
"""Check aiomcache installed, if absent - raises error.
"""
try:
import aiomcache
except ImportError: # pragma: no cover
aiomcache = None
if aiomcache is None:
raise RuntimeError("Please install aiomcache: pip install sanic_session[aiomcache]")
class MemcacheSessionInterface(BaseSessionInterface):
def __init__(
self, memcache_connection,
domain: str=None, expiry: int = 2592000,
httponly: bool=True, cookie_name: str = 'session',
prefix: str = 'session:',
sessioncookie: bool=False,
pass_dependency_check: bool=False):
"""Initializes the interface for storing client sessions in memcache.
Requires a client object establised with `asyncio_memcache`.
Args:
memcache_connection (aiomccache.Client):
The memcache client used for interfacing with memcache.
domain (str, optional):
Optional domain which will be attached to the cookie.
expiry (int, optional):
Seconds until the session should expire.
httponly (bool, optional):
Adds the `httponly` flag to the session cookie.
cookie_name (str, optional):
Name used for the client cookie.
prefix (str, optional):
Memcache keys will take the format of `prefix+session_id`;
specify the prefix here.
sessioncookie (bool, optional):
Specifies if the sent cookie should be a 'session cookie', i.e
no Expires or Max-age headers are included. Expiry is still
fully tracked on the server side. Default setting is False.
pass_dependency_check (bool, optional):
Specifies, whether to check: are dependencies for
session interface installed.
Check can be passed, for example, when running tests.
"""
if not pass_dependency_check:
check_aiomcache_installed()
self.memcache_connection = memcache_connection
# memcache has a maximum 30-day cache limit
if expiry > 2592000:
self.expiry = 0
else:
self.expiry = expiry
self.prefix = prefix
self.cookie_name = cookie_name
self.domain = domain
self.httponly = httponly
self.sessioncookie = sessioncookie
async def _get_value(self, prefix, sid):
key = (self.prefix + sid).encode()
value = await self.memcache_connection.get(key)
return value.decode() if value else None
async def _delete_key(self, key):
return await self.memcache_connection.delete(key.encode())
async def _set_value(self, key, data):
return await self.memcache_connection.set(
key.encode(), data.encode(),
exptime=self.expiry
)
|
/sanic_session_2-0.2.6.tar.gz/sanic_session_2-0.2.6/sanic_session/memcache.py
| 0.715523 | 0.150496 |
memcache.py
|
pypi
|
from .base import BaseSessionInterface
def check_aioredis_installed():
"""Check aioredis installed, if absent - raises error.
"""
try:
import aioredis
except ImportError:
raise RuntimeError("Please install aioredis: pip install sanic_session[aioredis]")
class AIORedisSessionInterface(BaseSessionInterface):
def __init__(
self,
redis,
domain: str=None,
expiry: int = 2592000,
httponly: bool=True,
cookie_name: str='session',
prefix: str='session:',
sessioncookie: bool=False,
pass_dependency_check: bool=False,
):
"""Initializes a session interface backed by Redis.
Args:
redis (Callable):
aioredis connection or connection pool instance.
domain (str, optional):
Optional domain which will be attached to the cookie.
expiry (int, optional):
Seconds until the session should expire.
httponly (bool, optional):
Adds the `httponly` flag to the session cookie.
cookie_name (str, optional):
Name used for the client cookie.
prefix (str, optional):
Memcache keys will take the format of `prefix+session_id`;
specify the prefix here.
sessioncookie (bool, optional):
Specifies if the sent cookie should be a 'session cookie', i.e
no Expires or Max-age headers are included. Expiry is still
fully tracked on the server side. Default setting is False.
pass_dependency_check (bool, optional):
Specifies, whether to check: are dependencies for
session interface installed.
Check can be passed, for example, when running tests.
"""
if not pass_dependency_check:
check_aioredis_installed()
self.redis = redis
self.expiry = expiry
self.prefix = prefix
self.cookie_name = cookie_name
self.domain = domain
self.httponly = httponly
self.sessioncookie = sessioncookie
async def _get_value(self, prefix, sid):
return await self.redis.get(self.prefix + sid)
async def _delete_key(self, key):
await self.redis.delete(key)
async def _set_value(self, key, data):
await self.redis.setex(key, self.expiry, data)
|
/sanic_session_2-0.2.6.tar.gz/sanic_session_2-0.2.6/sanic_session/aioredis.py
| 0.725454 | 0.168036 |
aioredis.py
|
pypi
|
from typing import Callable
from .base import BaseSessionInterface
def check_asyncio_redis_installed():
"""Check asyncio_redis installed, if absent - raises error.
"""
try:
import asyncio_redis
except ImportError:
raise RuntimeError("Please install asyncio_redis: pip install sanic_session[asyncio_redis]")
class AsyncioRedisSessionInterface(BaseSessionInterface):
def __init__(
self,
redis_connection: Callable,
domain: str=None,
expiry: int=2592000,
httponly :bool=True,
cookie_name: str='session',
prefix: str='session:',
sessioncookie: bool=False,
pass_dependency_check: bool=False,
):
"""Initializes a session interface backed by Redis.
Args:
redis_connection (Callable):
asyncio_redis connection pool (suggested)
or an asyncio_redis Redis connection.
domain (str, optional):
Optional domain which will be attached to the cookie.
expiry (int, optional):
Seconds until the session should expire.
httponly (bool, optional):
Adds the `httponly` flag to the session cookie.
cookie_name (str, optional):
Name used for the client cookie.
prefix (str, optional):
Memcache keys will take the format of `prefix+session_id`;
specify the prefix here.
sessioncookie (bool, optional):
Specifies if the sent cookie should be a 'session cookie', i.e
no Expires or Max-age headers are included. Expiry is still
fully tracked on the server side. Default setting is False.
pass_dependency_check (bool, optional):
Specifies, whether to check: are dependencies for
session interface installed.
Check can be passed, for example, when running tests.
"""
if not pass_dependency_check:
check_asyncio_redis_installed()
self.redis_connection = redis_connection
self.expiry = expiry
self.prefix = prefix
self.cookie_name = cookie_name
self.domain = domain
self.httponly = httponly
self.sessioncookie = sessioncookie
async def _get_value(self, prefix, key):
return await self.redis_connection.get(prefix + key)
async def _delete_key(self, key):
await self.redis_connection.delete([key])
async def _set_value(self, key, data):
await self.redis_connection.setex(key, self.expiry, data)
|
/sanic_session_2-0.2.6.tar.gz/sanic_session_2-0.2.6/sanic_session/asyncio_redis.py
| 0.806434 | 0.205018 |
asyncio_redis.py
|
pypi
|
import traceback
import aiomysql
import pymysql
version = "0.2"
version_info = (0, 2, 0, 0)
class SanicDB:
"""A lightweight wrapper around aiomysql.Pool for easy to use
"""
def __init__(self, host, database, user, password,
loop=None, sanic=None,
minsize=3, maxsize=5,
return_dict=True,
pool_recycle=7*3600,
autocommit=True,
charset = "utf8mb4", **kwargs):
'''
kwargs: all parameters that aiomysql.connect() accept.
'''
self.db_args = {
'host': host,
'db': database,
'user': user,
'password': password,
'minsize': minsize,
'maxsize': maxsize,
'charset': charset,
'loop': loop,
'autocommit': autocommit,
'pool_recycle': pool_recycle,
}
self.sanic = sanic
if sanic:
sanic.db = self
if return_dict:
self.db_args['cursorclass']=aiomysql.cursors.DictCursor
if kwargs:
self.db_args.update(kwargs)
self.pool = None
async def init_pool(self):
if self.sanic:
self.db_args['loop'] = self.sanic.loop
self.pool = await aiomysql.create_pool(**self.db_args)
async def query(self, query, *parameters, **kwparameters):
"""Returns a row list for the given query and parameters."""
if not self.pool:
await self.init_pool()
async with self.pool.acquire() as conn:
async with conn.cursor() as cur:
try:
await cur.execute(query, kwparameters or parameters)
ret = await cur.fetchall()
except pymysql.err.InternalError:
await conn.ping()
await cur.execute(query, kwparameters or parameters)
ret = await cur.fetchall()
return ret
async def get(self, query, *parameters, **kwparameters):
"""Returns the (singular) row returned by the given query.
"""
if not self.pool:
await self.init_pool()
async with self.pool.acquire() as conn:
async with conn.cursor() as cur:
try:
await cur.execute(query, kwparameters or parameters)
ret = await cur.fetchone()
except pymysql.err.InternalError:
await conn.ping()
await cur.execute(query, kwparameters or parameters)
ret = await cur.fetchone()
return ret
async def execute(self, query, *parameters, **kwparameters):
"""Executes the given query, returning the lastrowid from the query."""
if not self.pool:
await self.init_pool()
async with self.pool.acquire() as conn:
async with conn.cursor() as cur:
try:
await cur.execute(query, kwparameters or parameters)
except Exception:
# https://github.com/aio-libs/aiomysql/issues/340
await conn.ping()
await cur.execute(query, kwparameters or parameters)
return cur.lastrowid
# high level interface
async def table_has(self, table_name, field, value):
sql = 'SELECT {} FROM {} WHERE {}=%s limit 1'.format(field, table_name, field)
d = await self.get(sql, value)
return d
async def table_insert(self, table_name, item, ignore_duplicated=True):
'''item is a dict : key is mysql table field'''
fields = list(item.keys())
values = list(item.values())
fieldstr = ','.join(fields)
valstr = ','.join(['%s'] * len(item))
sql = 'INSERT INTO %s (%s) VALUES(%s)' % (table_name, fieldstr, valstr)
try:
last_id = await self.execute(sql, *values)
return last_id
except Exception as e:
if ignore_duplicated and e.args[0] == 1062:
# just skip duplicated item
return 0
traceback.print_exc()
print('sql:', sql)
print('item:')
for i in range(len(fields)):
vs = str(values[i])
if len(vs) > 300:
print(fields[i], ' : ', len(vs), type(values[i]))
else:
print(fields[i], ' : ', vs, type(values[i]))
raise e
async def table_update(self, table_name, updates,
field_where, value_where):
'''updates is a dict of {field_update:value_update}'''
upsets = []
values = []
for k, v in updates.items():
s = '%s=%%s' % k
upsets.append(s)
values.append(v)
upsets = ','.join(upsets)
sql = 'UPDATE %s SET %s WHERE %s="%s"' % (
table_name,
upsets,
field_where, value_where,
)
await self.execute(sql, *(values))
|
/sanicdb-0.2-py3-none-any.whl/sanicdb.py
| 0.509276 | 0.179423 |
sanicdb.py
|
pypi
|
sanitize_ml_labels
=========================================================================================
|pip| |downloads|
Simple python package to sanitize in a standard way ML-related labels.
Why do I need this?
-------------------
So you have some kind of plot and you have some ML-related labels.
Since I always rename and sanitize them the same way, I have prepared
this package to always sanitize them in a standard fashion.
How do I install this package?
----------------------------------------------
As usual, just download it using pip:
.. code:: shell
pip install sanitize_ml_labels
Usage examples
----------------------------------------------
Here you have a couple of common examples: you have a set of metrics to normalize or a set of model names to normalize.
.. code:: python
from sanitize_ml_labels import sanitize_ml_labels
# Example for metrics
labels = [
"acc",
"loss",
"auroc",
"lr"
]
sanitize_ml_labels(labels)
# ["Accuracy", "Loss", "AUROC", "Learning rate"]
# Example for models
labels = [
"vanilla mlp",
"vanilla cnn",
"vanilla ffnn",
"vanilla perceptron"
]
sanitize_ml_labels(labels)
# ["MLP", "CNN", "FFNN", "Perceptron"]
Corner cases
~~~~~~~~~~~~~~~~
In some cases, you may have a combination of terms separated by hyphens that must be removed, plus words
that are actually correctly written separated by hyphens. We approach this problem with an heuristic
based on an `extended list of over 45K hyphenated english words <https://github.com/LucaCappelletti94/sanitize_ml_labels/blob/master/hyphenations.json.gz>`__, originally retrieved from
the `Metadata consulting website <https://metadataconsulting.blogspot.com/2019/07/An-extensive-massive-near-complete-list-of-all-English-Hyphenated-words.html>`__.
From such a word list, we generate an index by running:
.. code:: python
index = {}
for word in words:
word = word.lower()
index.setdefault(word[0], []).append((word, word[1:]))
And from there the user experience is transparent and looks as follows:
.. code:: python
# Running the following
sanitize_ml_labels("non-existent-edges-in-graph")
# will yield the string `Non-existent edges in graph`
The lookup heuristic to quickly find an hyphenated word in a given label from the large haystack
was written by `Tommaso Fontana <https://github.com/zommiommy>`__.
Extra utilities
---------------
Since I always use metric sanitization alongside axis normalization, it is useful to know which axis
should be maxed between zero and one to avoid any visualization bias to the metrics.
For this reason I have created the method :code:`is_normalized_metric`, which after having normalized the given metric
validates it against known normalized metrics (metrics between 0 and 1, is there another name? I could not figure out a better one).
Analogously, I have also created the method :code:`is_absolutely_normalized_metric` to validate a metric for the range
between -1 and 1.
.. code:: python
from sanitize_ml_labels import is_normalized_metric, is_absolutely_normalized_metric
is_normalized_metric("MSE") # False
is_normalized_metric("acc") # True
is_normalized_metric("accuracy") # True
is_normalized_metric("AUROC") # True
is_normalized_metric("auprc") # True
is_absolutely_normalized_metric("auprc") # False
is_absolutely_normalized_metric("MCC") # True
is_absolutely_normalized_metric("Markedness") # True
New features and issues
-----------------------
As always, for new features and issues you can either open a new issue and pull request.
A pull request will always be the quicker way, but I'll look into the issues when I get the time.
Tests Coverage
----------------------------------------------
I have strived to mantain a 100% code coverage in this project:
+---------------------------------------------------+------------+---------+----------+----------+
| Module | statements | missing | excluded | coverage |
+===================================================+============+=========+==========+==========+
| Total | 84 | 0 | 0 | 100% |
+---------------------------------------------------+------------+---------+----------+----------+
| sanitize_ml_labels/__init__.py | 3 | 0 | 0 | 100% |
+---------------------------------------------------+------------+---------+----------+----------+
| sanitize_ml_labels/__version__.py | 1 | 0 | 0 | 100% |
+---------------------------------------------------+------------+---------+----------+----------+
| sanitize_ml_labels/is_normalized_metric.py | 10 | 0 | 0 | 100% |
+---------------------------------------------------+------------+---------+----------+----------+
| sanitize_ml_labels/find_true_hyphenated_words.py | 19 | 0 | 0 | 100% |
+---------------------------------------------------+------------+---------+----------+----------+
| sanitize_ml_labels/sanitize_ml_labels.py | 70 | 0 | 0 | 100% |
+---------------------------------------------------+------------+---------+----------+----------+
You can verify the test coverage of this repository by running in its root:
.. code:: bash
pytest --cov
.. |pip| image:: https://badge.fury.io/py/sanitize-ml-labels.svg
:target: https://badge.fury.io/py/sanitize-ml-labels
:alt: Pypi project
.. |downloads| image:: https://pepy.tech/badge/sanitize-ml-labels
:target: https://pepy.tech/badge/sanitize-ml-labels
:alt: Pypi total project downloads
|
/sanitize_ml_labels-1.0.50.tar.gz/sanitize_ml_labels-1.0.50/README.rst
| 0.95183 | 0.740632 |
README.rst
|
pypi
|
from typing import List, Dict, Union
import re
import compress_json
from .find_true_hyphenated_words import find_true_hyphenated_words
def consonants_to_upper(label: str) -> str:
"""Return given label with consonants groups to uppercase.
Examples
--------
Vanilla cnn model -> Vanilla CNN model
mlp model -> MLP model
Parameters
----------
label: str
label to parse.
Returns
-------
Label with formatted consonants.
"""
return re.sub(
r"\b([b-df-hj-np-tv-zB-DF-HJ-NP-TV-Z]{2,})\b",
lambda x: x.group(1).upper(),
label
)
def targets_to_spaces(label: str, targets: List[str]) -> str:
"""Return given label with consonants groups to uppercase.
Examples
--------
vanilla-cnn_model -> vanilla cnn model
mlp_model -> mlp_model
Parameters
----------
label: str
label to parse.
targets: List[str]
list of targets to replace to spaces.
Returns
-------
Label with replaced spaces.
"""
for target in targets:
if target in label:
label = label.replace(target, " ")
return label
def have_descriptor(labels: List[str], descriptor: str, generic_words_cooccurring_with_descriptors: List[str]) -> bool:
"""Return boolean representing if all labels contain the given descriptor.
Parameters
----------
labels: List[str],
labels to parse.
descriptor: str,
The descriptor that all texts need to contain.
A descriptor is a term like 'vanilla' or 'biolink', that is often
added to all the terms in a set. When all terms in a set have the
same descriptor, there is no need for the descriptor to be shown
in the first place and only contributes to cluttering in the
visualization at hand.
generic_words_cooccurring_with_descriptors: List[str],
List of words that is known to appear with descriptors.
Some words, like 'Other' or 'Unknown' often are added to descriptors
sets, without the main descriptor. In these cases we still want to drop
the descriptor if all the other terms have it.
Returns
-------
Boolean representing whetever labels are all with descriptor.
"""
return all(
descriptor in label
for label in labels
if label.lower() not in generic_words_cooccurring_with_descriptors
)
def are_real_values_labels(labels: List[str]) -> bool:
"""Return whether all labels are floating point values.
Parameters
----------
labels: List[str],
labels to parse.
"""
for label in labels:
try:
float(label.strip())
except ValueError:
return False
return True
def sanitize_real_valued_labels(labels: List[str], maximum_resolution: int) -> List[str]:
"""Returns list of real valued labels without trailing zeros.
Parameters
----------
labels: List[str]
labels to parse.
maximum_resolution: int
Maximum length of the floating point part.
"""
new_labels = []
for label in labels:
label = label.strip()
if "." in label:
label = "{{:.{maximum_resolution}f}}".format(
maximum_resolution=maximum_resolution
).format(float(label))
label = ".".join((
label.split(".")[0],
label.split(".")[1].rstrip("0")
)).strip(".")
new_labels.append(label)
return new_labels
def remove_descriptor(labels: List[str], descriptor: str) -> List[str]:
"""Return list of labels without the term descriptor"""
return [
label.replace(descriptor, "")
for label in labels
]
def apply_replace_defaults(labels: List[str], custom_defaults: Dict[str, List[str]]) -> List[str]:
"""Return list of labels with replaced defaults."""
defaults = {
**{
key: [
"(?<![a-z]){}(?![a-z])".format(val)
for val in values
]
for key, values in compress_json.local_load("labels.json").items()
},
**custom_defaults
}
new_labels = []
for label in labels:
replace_candidates = []
for default, targets in defaults.items():
for target in targets:
regex = re.compile(target, re.IGNORECASE)
matches = regex.findall(label)
if bool(matches):
for match in matches:
replace_candidates.append((match, default))
# The following is required to avoid replacing substrings.
replace_candidates = sorted(
replace_candidates,
key=lambda x: len(x[0]),
reverse=False
)
replace_candidates = [
(j, val)
for i, (j, val) in enumerate(replace_candidates)
if all(j not in k.lower() for _, k in replace_candidates[i + 1:])
]
replace_candidates = sorted(
replace_candidates,
key=lambda x: len(x[0]),
reverse=True
)
for target, default in replace_candidates:
label = label.replace(target, default)
new_labels.append(label)
return new_labels
def clear_spaces(labels: List[str]) -> List[str]:
"""Remove multiple sequences of spaces and strip spaces from labels.
Parameters
---------------------------------
labels: List[str],
The labels from where to remove the duplicated spaces.
Returns
---------------------------------
List of labels without duplicated spaces.
"""
return [
" ".join([
term
for term in label.split()
if term
])
for label in labels
]
def apply_soft_capitalization(labels: List[str]) -> List[str]:
"""Return labels capitalized only when no other capitalization is present.
Parameters
------------------------
labels: List[str]
The labels where to apply soft capitalization.
Returns
------------------------
List of labels with soft capitalization applied.
"""
return [
label.capitalize() if label.lower() == label
else label
for label in labels
]
def to_string(labels: List) -> List[str]:
"""Convert all labels to strings.
Parameters
-----------------------
labels: List
The labels to be converted to strings if they are not already.
Returns
-----------------------
List with labels converted to strings.
"""
return [
str(label)
for label in labels
]
def sanitize_ml_labels(
labels: Union[List[str], str],
upper_case_consonants_clusters: bool = True,
replace_with_spaces: List[str] = ("-", "_", ":", "<", ">"),
detect_and_remove_homogeneous_descriptors: bool = True,
detect_and_remove_trailing_zeros: bool = True,
replace_defaults: bool = True,
soft_capitalization: bool = True,
preserve_true_hyphenation: bool = True,
maximum_resolution: int = 3,
custom_defaults: Dict[str, Union[List[str], str]] = None
) -> List[str]:
"""Return sanitized labels in standard way.
Parameters
----------
labels: Union[List[str], str]
Wither label or list of labels to sanitize.
upper_case_consonants_clusters: bool = True
Whetever to convert to upper case detected initials.
replace_with_spaces: List[str] = ("-", "_", ":", "<", ">")
Characters to be replaced with spaces.
detect_and_remove_homogeneous_descriptors: bool = True
Whetever to remove the known descriptors when all terms contain it.
detect_and_remove_trailing_zeros: bool = True
Whether to remove trailing zeros when labels are all numeric.
replace_defaults: bool = True
Whetever to replace default terms.
soft_capitalization: bool = True
Whetever to apply soft capitalization,
replacing capitalization only when no capitalization is already present.
preserve_true_hyphenation: bool = True
Whether we should try to preserve the true hyphenation
when the hyphen character should be otherwise removed.
Consider that this is done through a comprehensive heuristic
using over 45k hyphenated words from the English language.
maximum_resolution: int = 3
Maximum number of digits to preserve in real-valued labels.
custom_defaults: Dict[str, Union[List[str], str]] = None
List of custom defaults to be used for remapping.
Returns
-------
Sanitized labels.
"""
try:
iter(labels)
is_iterable = True
except TypeError:
is_iterable = False
single_label = not is_iterable or isinstance(labels, str)
if single_label:
labels = [labels]
labels = to_string(labels)
if detect_and_remove_homogeneous_descriptors:
generic_words_cooccurring_with_descriptors = compress_json.local_load(
"generic_words_cooccurring_with_descriptors.json"
)
for descriptor in compress_json.local_load("descriptors.json"):
if have_descriptor(labels, descriptor, generic_words_cooccurring_with_descriptors):
labels = remove_descriptor(
labels,
descriptor
)
if soft_capitalization:
labels = apply_soft_capitalization(labels)
if replace_defaults:
if custom_defaults is None:
custom_defaults = dict()
custom_defaults = dict([
(key, value) if isinstance(value, list)
else (key, [value])
for key, value in custom_defaults.items()
])
labels = apply_replace_defaults(labels, custom_defaults)
if detect_and_remove_trailing_zeros and are_real_values_labels(labels):
labels = sanitize_real_valued_labels(
labels,
maximum_resolution=maximum_resolution
)
else:
# If the hyphen character is among the characters that we should
# remove to normalize the label and it is requested to preserve
# the true hyphenation wherever possible, we try to identify
# the true hyphenated words through an heuristic
need_to_run_hyphenation_check = (
"-" in replace_with_spaces and
preserve_true_hyphenation and
any("-" in label for label in labels)
)
if need_to_run_hyphenation_check:
new_labels = []
hyphenated_words = []
for label in labels:
if "-" in label:
lowercase_label = label.lower()
true_hyphenated_words = find_true_hyphenated_words(
lowercase_label
)
for true_hyphenated_word in true_hyphenated_words:
lowercase_label = label.lower()
position = lowercase_label.find(true_hyphenated_word)
if position == -1:
continue
true_hyphenated_word_with_possible_capitalization = label[position:position+len(
true_hyphenated_word
)]
label = label.replace(true_hyphenated_word_with_possible_capitalization, "{{word{number}}}".format(
number=len(hyphenated_words)
))
hyphenated_words.append(
true_hyphenated_word_with_possible_capitalization
)
new_labels.append(label)
# We update the current labels with the new labels
# that are now wrapped to avoid to remove hyphenated words.
labels = new_labels
labels = [
targets_to_spaces(label, replace_with_spaces)
for label in labels
]
labels = clear_spaces(labels)
# We now need to restore the hyphenated words which we have
# previously wrapped, if we have done so.
if need_to_run_hyphenation_check:
restored_labels = []
for label in labels:
for i, hyphenated_word in enumerate(hyphenated_words):
label = label.replace(
"{{word{number}}}".format(number=i),
hyphenated_word
)
restored_labels.append(label)
labels = restored_labels
if soft_capitalization:
labels = apply_soft_capitalization(labels)
if upper_case_consonants_clusters:
labels = [
consonants_to_upper(label)
for label in labels
]
if single_label:
return labels[0]
return labels
|
/sanitize_ml_labels-1.0.50.tar.gz/sanitize_ml_labels-1.0.50/sanitize_ml_labels/sanitize_ml_labels.py
| 0.954308 | 0.545286 |
sanitize_ml_labels.py
|
pypi
|
<a href="https://pypi.org/project/sanity-html/">
<img src="https://img.shields.io/pypi/v/sanity-html.svg" alt="Package version">
</a>
<a href="https://codecov.io/gh/otovo/python-sanity-html">
<img src="https://codecov.io/gh/otovo/python-sanity-html/branch/main/graph/badge.svg" alt="Code coverage">
</a>
<a href="https://pypi.org/project/python-sanity-html/">
<img src="https://img.shields.io/badge/python-3.7%2B-blue" alt="Supported Python versions">
</a>
<a href="http://mypy-lang.org/">
<img src="http://www.mypy-lang.org/static/mypy_badge.svg" alt="Checked with mypy">
</a>
# Sanity HTML Renderer for Python
This package generates HTML from [Portable Text](https://github.com/portabletext/portabletext).
For the most part, it mirrors [Sanity's](https://www.sanity.io/) own [block-content-to-html](https://www.npmjs.com/package/%40sanity/block-content-to-html) NPM library.
## Installation
```
pip install sanity-html
```
## Usage
Instantiate the `SanityBlockRenderer` class with your content and call the `render` method.
The following content
```python
from sanity_html import SanityBlockRenderer
renderer = SanityBlockRenderer({
"_key": "R5FvMrjo",
"_type": "block",
"children": [
{"_key": "cZUQGmh4", "_type": "span", "marks": ["strong"], "text": "A word of"},
{"_key": "toaiCqIK", "_type": "span", "marks": ["strong"], "text": " warning;"},
{"_key": "gaZingsA", "_type": "span", "marks": [], "text": " Sanity is addictive."}
],
"markDefs": [],
"style": "normal"
})
renderer.render()
```
Generates this HTML
```html
<p><strong>A word of warning;</strong> Sanity is addictive.</p>
```
### Supported types
The `block` and `span` types are supported out of the box.
### Custom types
Beyond the built-in types, you have the freedom to provide
your own serializers to render any custom `_type` the way you
would like to.
To illustrate, if you passed this data to the renderer class:
```python
from sanity_html import SanityBlockRenderer
renderer = SanityBlockRenderer({
"_type": "block",
"_key": "foo",
"style": "normal",
"children": [
{
"_type": "span",
"text": "Press, "
},
{
"_type": "button",
"text": "here"
},
{
"_type": "span",
"text": ", now!"
}
]
})
renderer.render()
```
The renderer would actually throw an error here, since `button`
does not have a corresponding built-in type serializer by default.
To render this text you must provide your own serializer, like this:
```python
from sanity_html import SanityBlockRenderer
def button_serializer(node: dict, context: Optional[Block], list_item: bool):
return f'<button>{node["text"]}</button>'
renderer = SanityBlockRenderer(
...,
custom_serializers={'button': button_serializer}
)
output = renderer.render()
```
With the custom serializer provided, the renderer would now successfully
output the following HTML:
```html
<p>Press <button>here</button>, now!</p>
```
### Supported mark definitions
The package provides several built-in marker definitions and styles:
**decorator marker definitions**
- `em`
- `strong`
- `code`
- `underline`
- `strike-through`
**annotation marker definitions**
- `link`
- `comment`
### Custom mark definitions
Like with custom type serializers, additional serializers for
marker definitions and styles can be passed in like this:
```python
from sanity_html import SanityBlockRenderer
renderer = SanityBlockRenderer(
...,
custom_marker_definitions={'em': ComicSansEmphasis}
)
renderer.render()
```
The primary difference between a type serializer and a mark definition serializer
is that the latter uses a class structure, and has three required methods.
Here's an example of a custom style, adding an extra font
to the built-in equivalent serializer:
```python
from sanity_html.marker_definitions import MarkerDefinition
class ComicSansEmphasis(MarkerDefinition):
tag = 'em'
@classmethod
def render_prefix(cls, span: Span, marker: str, context: Block) -> str:
return f'<{cls.tag} style="font-family: "Comic Sans MS", "Comic Sans", cursive;">'
@classmethod
def render_suffix(cls, span: Span, marker: str, context: Block) -> str:
return f'</{cls.tag}>'
@classmethod
def render(cls, span: Span, marker: str, context: Block) -> str:
result = cls.render_prefix(span, marker, context)
result += str(span.text)
result += cls.render_suffix(span, marker, context)
return result
```
Since the `render_suffix` and `render` methods here are actually identical to the base class,
they do not need to be specified, and the whole example can be reduced to:
```python
from sanity_html.marker_definitions import MarkerDefinition # base
from sanity_html import SanityBlockRenderer
class ComicSansEmphasis(MarkerDefinition):
tag = 'em'
@classmethod
def render_prefix(cls, span: Span, marker: str, context: Block) -> str:
return f'<{cls.tag} style="font-family: "Comic Sans MS", "Comic Sans", cursive;">'
renderer = SanityBlockRenderer(
...,
custom_marker_definitions={'em': ComicSansEmphasis}
)
renderer.render()
```
### Supported styles
Blocks can optionally define a `style` tag. These styles are supported:
- `h1`
- `h2`
- `h3`
- `h4`
- `h5`
- `h6`
- `blockquote`
- `normal`
## Missing features
For anyone interested, we would be happy to see a
default built-in serializer for the `image` type added.
In the meantime, users should be able to serialize image types by passing a custom serializer.
## Contributing
Contributions are always appreciated 👏
For details, see the [CONTRIBUTING.md](https://github.com/otovo/python-sanity-html/blob/main/CONTRIBUTING.md).
|
/sanity-html-1.0.0.tar.gz/sanity-html-1.0.0/README.md
| 0.670932 | 0.915922 |
README.md
|
pypi
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, cast
from sanity_html.utils import get_default_marker_definitions
if TYPE_CHECKING:
from typing import Literal, Optional, Tuple, Type, Union
from sanity_html.marker_definitions import MarkerDefinition
@dataclass(frozen=True)
class Span:
"""Class representation of a Portable Text span.
A span is the standard way to express inline text within a block.
"""
_type: Literal['span']
text: str
_key: Optional[str] = None
marks: list[str] = field(default_factory=list) # keys that correspond with block.mark_definitions
style: Literal['normal'] = 'normal'
@dataclass
class Block:
"""Class representation of a Portable Text block.
A block is what's typically recognized as a section of a text, e.g. a paragraph or a heading.
listItem and markDefs are camelCased to support dictionary unpacking.
"""
_type: Literal['block']
_key: Optional[str] = None
style: Literal['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'blockquote', 'normal'] = 'normal'
level: Optional[int] = None
listItem: Optional[Literal['bullet', 'number', 'square']] = None
children: list[dict] = field(default_factory=list)
markDefs: list[dict] = field(default_factory=list)
marker_definitions: dict[str, Type[MarkerDefinition]] = field(default_factory=dict)
marker_frequencies: dict[str, int] = field(init=False)
def __post_init__(self) -> None:
"""
Add custom fields after init.
To make handling of span `marks` simpler, we define marker_definitions as a dict, from which
we can directly look up both annotation marks or decorator marks.
"""
marker_definitions = get_default_marker_definitions(self.markDefs)
marker_definitions.update(self.marker_definitions)
self.marker_definitions = marker_definitions
self.marker_frequencies = self._compute_marker_frequencies()
def _compute_marker_frequencies(self) -> dict[str, int]:
counts: dict[str, int] = {}
for child in self.children:
for mark in child.get('marks', []):
if mark in counts:
counts[mark] += 1
else:
counts[mark] = 0
return counts
def get_node_siblings(self, node: Union[dict, Span]) -> Tuple[Optional[dict], Optional[dict]]:
"""Return the sibling nodes (prev, next) to the given node."""
if not self.children:
return None, None
try:
if type(node) == dict:
node = cast(dict, node)
node_idx = self.children.index(node)
elif type(node) == Span:
node = cast(Span, node)
for index, item in enumerate(self.children):
if 'text' in item and node.text == item['text']:
# Is it possible to handle several identical texts?
node_idx = index
break
else:
raise ValueError(f'Expected dict or Span but received {type(node)}')
except ValueError:
return None, None
prev_node = None
next_node = None
if node_idx != 0:
prev_node = self.children[node_idx - 1]
if node_idx != len(self.children) - 1:
next_node = self.children[node_idx + 1]
return prev_node, next_node
|
/sanity-html-1.0.0.tar.gz/sanity-html-1.0.0/sanity_html/types.py
| 0.888995 | 0.354126 |
types.py
|
pypi
|
from __future__ import annotations
from typing import TYPE_CHECKING
from sanity_html.logger import logger
if TYPE_CHECKING:
from typing import Type
from sanity_html.types import Block, Span
class MarkerDefinition:
"""Base class for marker definition handlers."""
tag: str
@classmethod
def render_prefix(cls: Type[MarkerDefinition], span: Span, marker: str, context: Block) -> str:
"""Render the prefix for the marked span.
Usually this this the opening of the HTML tag.
"""
logger.debug('Rendering %s prefix', cls.tag)
return f'<{cls.tag}>'
@classmethod
def render_suffix(cls: Type[MarkerDefinition], span: Span, marker: str, context: Block) -> str:
"""Render the suffix for the marked span.
Usually this this the closing of the HTML tag.
"""
logger.debug('Rendering %s suffix', cls.tag)
return f'</{cls.tag}>'
@classmethod
def render(cls: Type[MarkerDefinition], span: Span, marker: str, context: Block) -> str:
"""Render the marked span directly with prefix and suffix."""
result = cls.render_prefix(span, marker, context)
result += str(span.text)
result += cls.render_suffix(span, marker, context)
return result
# Decorators
class DefaultMarkerDefinition(MarkerDefinition):
"""Marker used for unknown definitions."""
tag = 'span'
class EmphasisMarkerDefinition(MarkerDefinition):
"""Marker definition for <em> rendering."""
tag = 'em'
class StrongMarkerDefinition(MarkerDefinition):
"""Marker definition for <strong> rendering."""
tag = 'strong'
class CodeMarkerDefinition(MarkerDefinition):
"""Marker definition for <code> rendering."""
tag = 'code'
class UnderlineMarkerDefinition(MarkerDefinition):
"""Marker definition for <u> rendering."""
tag = 'span'
@classmethod
def render_prefix(cls: Type[MarkerDefinition], span: Span, marker: str, context: Block) -> str:
"""Render the span with the appropriate style for underline."""
return '<span style="text-decoration:underline;">'
class StrikeThroughMarkerDefinition(MarkerDefinition):
"""Marker definition for <strike> rendering."""
tag = 'del'
# Annotations
class LinkMarkerDefinition(MarkerDefinition):
"""Marker definition for link rendering."""
tag = 'a'
@classmethod
def render_prefix(cls: Type[MarkerDefinition], span: Span, marker: str, context: Block) -> str:
"""Render the opening anchor tag with the href attribute set.
The href attribute is fetched from the provided block context using
the provided marker key.
"""
marker_definition = next((md for md in context.markDefs if md['_key'] == marker), None)
if not marker_definition:
raise ValueError(f'Marker definition for key: {marker} not found in parent block context')
href = marker_definition.get('href', '')
return f'<a href="{href}">'
class CommentMarkerDefinition(MarkerDefinition):
"""Marker definition for HTML comment rendering."""
tag = '!--'
@classmethod
def render_prefix(cls: Type[MarkerDefinition], span: Span, marker: str, context: Block) -> str:
"""Render the opening of the HTML comment block."""
return '<!-- '
@classmethod
def render_suffix(cls: Type[MarkerDefinition], span: Span, marker: str, context: Block) -> str:
"""Render the closing part of the HTML comment block."""
return ' -->'
|
/sanity-html-1.0.0.tar.gz/sanity-html-1.0.0/sanity_html/marker_definitions.py
| 0.953665 | 0.349588 |
marker_definitions.py
|
pypi
|
from __future__ import annotations
import html
from typing import TYPE_CHECKING, cast
from sanity_html.constants import STYLE_MAP
from sanity_html.logger import logger
from sanity_html.marker_definitions import DefaultMarkerDefinition
from sanity_html.types import Block, Span
from sanity_html.utils import get_list_tags, is_block, is_list, is_span
if TYPE_CHECKING:
from typing import Callable, Dict, List, Optional, Type, Union
from sanity_html.marker_definitions import MarkerDefinition
class UnhandledNodeError(Exception):
"""Raised when we receive a node that we cannot parse."""
pass
class MissingSerializerError(UnhandledNodeError):
"""
Raised when an unrecognized node _type value is found.
This usually means that you need to pass a custom serializer
to handle the custom type.
"""
pass
class SanityBlockRenderer:
"""HTML renderer for Sanity block content."""
def __init__(
self,
blocks: Union[list[dict], dict],
custom_marker_definitions: dict[str, Type[MarkerDefinition]] = None,
custom_serializers: dict[str, Callable[[dict, Optional[Block], bool], str]] = None,
) -> None:
logger.debug('Initializing block renderer')
self._wrapper_element: Optional[str] = None
self._custom_marker_definitions = custom_marker_definitions or {}
self._custom_serializers = custom_serializers or {}
if isinstance(blocks, dict):
self._blocks = [blocks]
elif isinstance(blocks, list):
self._blocks = blocks
self._wrapper_element = 'div' if len(blocks) > 1 else ''
def render(self) -> str:
"""Render HTML from self._blocks."""
logger.debug('Rendering HTML')
if not self._blocks:
return ''
result = ''
list_nodes: List[Dict] = []
for node in self._blocks:
if list_nodes and not is_list(node):
tree = self._normalize_list_tree(list_nodes)
result += ''.join([self._render_node(n, Block(**node), list_item=True) for n in tree])
list_nodes = [] # reset list_nodes
if is_list(node):
list_nodes.append(node)
continue # handle all elements ^ when the list ends
result += self._render_node(node) # render non-list nodes immediately
if list_nodes:
tree = self._normalize_list_tree(list_nodes)
result += ''.join(self._render_node(n, Block(**node), list_item=True) for n in tree)
result = result.strip()
if self._wrapper_element:
return f'<{self._wrapper_element}>{result}</{self._wrapper_element}>'
return result
def _render_node(self, node: dict, context: Optional[Block] = None, list_item: bool = False) -> str:
"""
Call the correct render method depending on the node type.
:param node: Block content node - can be block, span, or list (block).
:param context: Optional context. Spans are passed with a Block instance as context for mark lookups.
:param list_item: Whether we are handling a list upstream (impacts block handling).
"""
if is_list(node):
logger.debug('Rendering node as list')
block = Block(**node, marker_definitions=self._custom_marker_definitions)
return self._render_list(block, context)
elif is_block(node):
logger.debug('Rendering node as block')
block = Block(**node, marker_definitions=self._custom_marker_definitions)
return self._render_block(block, list_item=list_item)
elif is_span(node):
logger.debug('Rendering node as span')
span = Span(**node)
context = cast(Block, context) # context should always be a Block here
return self._render_span(span, block=context)
elif self._custom_serializers.get(node.get('_type', '')):
return self._custom_serializers.get(node.get('_type', ''))(node, context, list_item) # type: ignore
else:
if hasattr(node, '_type'):
raise MissingSerializerError(
f'Found unhandled node type: {node["_type"]}. ' 'Most likely this requires a custom serializer.'
)
else:
raise UnhandledNodeError(f'Received node that we cannot handle: {node}')
def _render_block(self, block: Block, list_item: bool = False) -> str:
text, tag = '', STYLE_MAP[block.style]
if not list_item or tag != 'p':
text += f'<{tag}>'
for child_node in block.children:
text += self._render_node(child_node, context=block)
if not list_item or tag != 'p':
text += f'</{tag}>'
return text
def _render_span(self, span: Span, block: Block) -> str:
logger.debug('Rendering span')
result: str = ''
prev_node, next_node = block.get_node_siblings(span)
prev_marks = prev_node.get('marks', []) if prev_node else []
next_marks = next_node.get('marks', []) if next_node else []
sorted_marks = sorted(span.marks, key=lambda x: -block.marker_frequencies[x])
for mark in sorted_marks:
if mark in prev_marks:
continue
marker_callable = block.marker_definitions.get(mark, DefaultMarkerDefinition)()
result += marker_callable.render_prefix(span, mark, block)
result += html.escape(span.text).replace('\n', '<br/>')
for mark in reversed(sorted_marks):
if mark in next_marks:
continue
marker_callable = block.marker_definitions.get(mark, DefaultMarkerDefinition)()
result += marker_callable.render_suffix(span, mark, block)
return result
def _render_list(self, node: Block, context: Optional[Block]) -> str:
assert node.listItem
head, tail = get_list_tags(node.listItem)
result = head
for child in node.children:
result += f'<li>{self._render_block(Block(**child), True)}</li>'
result += tail
return result
def _normalize_list_tree(self, nodes: list) -> list[dict]:
tree = []
current_list = None
for node in nodes:
if not is_block(node):
tree.append(node)
current_list = None
continue
if current_list is None:
current_list = self._list_from_block(node)
tree.append(current_list)
continue
if node.get('level') == current_list['level'] and node.get('listItem') == current_list['listItem']:
current_list['children'].append(node)
continue
if node.get('level') > current_list['level']:
new_list = self._list_from_block(node)
current_list['children'][-1]['children'].append(new_list)
current_list = new_list
continue
if node.get('level') < current_list['level']:
parent = self._find_list(tree[-1], level=node.get('level'), list_item=node.get('listItem'))
if parent:
current_list = parent
current_list['children'].append(node)
continue
current_list = self._list_from_block(node)
tree.append(current_list)
continue
if node.get('listItem') != current_list['listItem']:
match = self._find_list(tree[-1], level=node.get('level'))
if match and match['listItem'] == node.get('listItem'):
current_list = match
current_list['children'].append(node)
continue
current_list = self._list_from_block(node)
tree.append(current_list)
continue
# TODO: Warn
tree.append(node)
return tree
def _find_list(self, root_node: dict, level: int, list_item: Optional[str] = None) -> Optional[dict]:
filter_on_type = isinstance(list_item, str)
if (
root_node.get('_type') == 'list'
and root_node.get('level') == level
and (filter_on_type and root_node.get('listItem') == list_item)
):
return root_node
children = root_node.get('children')
if children:
return self._find_list(children[-1], level, list_item)
return None
def _list_from_block(self, block: dict) -> dict:
return {
'_type': 'list',
'_key': f'${block["_key"]}-parent',
'level': block.get('level'),
'listItem': block['listItem'],
'children': [block],
}
def render(blocks: List[Dict], *args, **kwargs) -> str:
"""Shortcut function inspired by Sanity's own blocksToHtml.h callable."""
renderer = SanityBlockRenderer(blocks, *args, **kwargs)
return renderer.render()
|
/sanity-html-1.0.0.tar.gz/sanity-html-1.0.0/sanity_html/renderer.py
| 0.877483 | 0.208602 |
renderer.py
|
pypi
|
goog.provide('goog.math.Long');
goog.require('goog.reflect');
/**
* Constructs a 64-bit two's-complement integer, given its low and high 32-bit
* values as *signed* integers. See the from* functions below for more
* convenient ways of constructing Longs.
*
* The internal representation of a long is the two given signed, 32-bit values.
* We use 32-bit pieces because these are the size of integers on which
* Javascript performs bit-operations. For operations like addition and
* multiplication, we split each number into 16-bit pieces, which can easily be
* multiplied within Javascript's floating-point representation without overflow
* or change in sign.
*
* In the algorithms below, we frequently reduce the negative case to the
* positive case by negating the input(s) and then post-processing the result.
* Note that we must ALWAYS check specially whether those values are MIN_VALUE
* (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as
* a positive number, it overflows back into a negative). Not handling this
* case would often result in infinite recursion.
*
* @param {number} low The low (signed) 32 bits of the long.
* @param {number} high The high (signed) 32 bits of the long.
* @struct
* @constructor
* @final
*/
goog.math.Long = function(low, high) {
/**
* @type {number}
* @private
*/
this.low_ = low | 0; // force into 32 signed bits.
/**
* @type {number}
* @private
*/
this.high_ = high | 0; // force into 32 signed bits.
};
// NOTE: Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the
// from* methods on which they depend.
/**
* A cache of the Long representations of small integer values.
* @type {!Object<number, !goog.math.Long>}
* @private
*/
goog.math.Long.IntCache_ = {};
/**
* A cache of the Long representations of common values.
* @type {!Object<goog.math.Long.ValueCacheId_, !goog.math.Long>}
* @private
*/
goog.math.Long.valueCache_ = {};
/**
* Returns a Long representing the given (32-bit) integer value.
* @param {number} value The 32-bit integer in question.
* @return {!goog.math.Long} The corresponding Long value.
*/
goog.math.Long.fromInt = function(value) {
if (-128 <= value && value < 128) {
return goog.reflect.cache(goog.math.Long.IntCache_, value, function(val) {
return new goog.math.Long(val | 0, val < 0 ? -1 : 0);
});
} else {
return new goog.math.Long(value | 0, value < 0 ? -1 : 0);
}
};
/**
* Returns a Long representing the given value.
* NaN will be returned as zero. Infinity is converted to max value and
* -Infinity to min value.
* @param {number} value The number in question.
* @return {!goog.math.Long} The corresponding Long value.
*/
goog.math.Long.fromNumber = function(value) {
if (isNaN(value)) {
return goog.math.Long.getZero();
} else if (value <= -goog.math.Long.TWO_PWR_63_DBL_) {
return goog.math.Long.getMinValue();
} else if (value + 1 >= goog.math.Long.TWO_PWR_63_DBL_) {
return goog.math.Long.getMaxValue();
} else if (value < 0) {
return goog.math.Long.fromNumber(-value).negate();
} else {
return new goog.math.Long(
(value % goog.math.Long.TWO_PWR_32_DBL_) | 0,
(value / goog.math.Long.TWO_PWR_32_DBL_) | 0);
}
};
/**
* Returns a Long representing the 64-bit integer that comes by concatenating
* the given high and low bits. Each is assumed to use 32 bits.
* @param {number} lowBits The low 32-bits.
* @param {number} highBits The high 32-bits.
* @return {!goog.math.Long} The corresponding Long value.
*/
goog.math.Long.fromBits = function(lowBits, highBits) {
return new goog.math.Long(lowBits, highBits);
};
/**
* Returns a Long representation of the given string, written using the given
* radix.
* @param {string} str The textual representation of the Long.
* @param {number=} opt_radix The radix in which the text is written.
* @return {!goog.math.Long} The corresponding Long value.
*/
goog.math.Long.fromString = function(str, opt_radix) {
if (str.length == 0) {
throw Error('number format error: empty string');
}
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (str.charAt(0) == '-') {
return goog.math.Long.fromString(str.substring(1), radix).negate();
} else if (str.indexOf('-') >= 0) {
throw Error('number format error: interior "-" character: ' + str);
}
// Do several (8) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = goog.math.Long.fromNumber(Math.pow(radix, 8));
var result = goog.math.Long.getZero();
for (var i = 0; i < str.length; i += 8) {
var size = Math.min(8, str.length - i);
var value = parseInt(str.substring(i, i + size), radix);
if (size < 8) {
var power = goog.math.Long.fromNumber(Math.pow(radix, size));
result = result.multiply(power).add(goog.math.Long.fromNumber(value));
} else {
result = result.multiply(radixToPower);
result = result.add(goog.math.Long.fromNumber(value));
}
}
return result;
};
// NOTE: the compiler should inline these constant values below and then remove
// these variables, so there should be no runtime penalty for these.
/**
* Number used repeated below in calculations. This must appear before the
* first call to any from* function below.
* @type {number}
* @private
*/
goog.math.Long.TWO_PWR_16_DBL_ = 1 << 16;
/**
* @type {number}
* @private
*/
goog.math.Long.TWO_PWR_32_DBL_ =
goog.math.Long.TWO_PWR_16_DBL_ * goog.math.Long.TWO_PWR_16_DBL_;
/**
* @type {number}
* @private
*/
goog.math.Long.TWO_PWR_64_DBL_ =
goog.math.Long.TWO_PWR_32_DBL_ * goog.math.Long.TWO_PWR_32_DBL_;
/**
* @type {number}
* @private
*/
goog.math.Long.TWO_PWR_63_DBL_ = goog.math.Long.TWO_PWR_64_DBL_ / 2;
/**
* @return {!goog.math.Long}
* @public
*/
goog.math.Long.getZero = function() {
return goog.reflect.cache(
goog.math.Long.valueCache_, goog.math.Long.ValueCacheId_.ZERO,
function() { return goog.math.Long.fromInt(0); });
};
/**
* @return {!goog.math.Long}
* @public
*/
goog.math.Long.getOne = function() {
return goog.reflect.cache(
goog.math.Long.valueCache_, goog.math.Long.ValueCacheId_.ONE,
function() { return goog.math.Long.fromInt(1); });
};
/**
* @return {!goog.math.Long}
* @public
*/
goog.math.Long.getNegOne = function() {
return goog.reflect.cache(
goog.math.Long.valueCache_, goog.math.Long.ValueCacheId_.NEG_ONE,
function() { return goog.math.Long.fromInt(-1); });
};
/**
* @return {!goog.math.Long}
* @public
*/
goog.math.Long.getMaxValue = function() {
return goog.reflect.cache(
goog.math.Long.valueCache_, goog.math.Long.ValueCacheId_.MAX_VALUE,
function() {
return goog.math.Long.fromBits(0xFFFFFFFF | 0, 0x7FFFFFFF | 0);
});
};
/**
* @return {!goog.math.Long}
* @public
*/
goog.math.Long.getMinValue = function() {
return goog.reflect.cache(
goog.math.Long.valueCache_, goog.math.Long.ValueCacheId_.MIN_VALUE,
function() { return goog.math.Long.fromBits(0, 0x80000000 | 0); });
};
/**
* @return {!goog.math.Long}
* @public
*/
goog.math.Long.getTwoPwr24 = function() {
return goog.reflect.cache(
goog.math.Long.valueCache_, goog.math.Long.ValueCacheId_.TWO_PWR_24,
function() { return goog.math.Long.fromInt(1 << 24); });
};
/** @return {number} The value, assuming it is a 32-bit integer. */
goog.math.Long.prototype.toInt = function() {
return this.low_;
};
/** @return {number} The closest floating-point representation to this value. */
goog.math.Long.prototype.toNumber = function() {
return this.high_ * goog.math.Long.TWO_PWR_32_DBL_ +
this.getLowBitsUnsigned();
};
/**
* @param {number=} opt_radix The radix in which the text should be written.
* @return {string} The textual representation of this value.
* @override
*/
goog.math.Long.prototype.toString = function(opt_radix) {
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (this.isZero()) {
return '0';
}
if (this.isNegative()) {
if (this.equals(goog.math.Long.getMinValue())) {
// We need to change the Long value before it can be negated, so we remove
// the bottom-most digit in this base and then recurse to do the rest.
var radixLong = goog.math.Long.fromNumber(radix);
var div = this.div(radixLong);
var rem = div.multiply(radixLong).subtract(this);
return div.toString(radix) + rem.toInt().toString(radix);
} else {
return '-' + this.negate().toString(radix);
}
}
// Do several (6) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = goog.math.Long.fromNumber(Math.pow(radix, 6));
var rem = this;
var result = '';
while (true) {
var remDiv = rem.div(radixToPower);
// The right shifting fixes negative values in the case when
// intval >= 2^31; for more details see
// https://github.com/google/closure-library/pull/498
var intval = rem.subtract(remDiv.multiply(radixToPower)).toInt() >>> 0;
var digits = intval.toString(radix);
rem = remDiv;
if (rem.isZero()) {
return digits + result;
} else {
while (digits.length < 6) {
digits = '0' + digits;
}
result = '' + digits + result;
}
}
};
/** @return {number} The high 32-bits as a signed value. */
goog.math.Long.prototype.getHighBits = function() {
return this.high_;
};
/** @return {number} The low 32-bits as a signed value. */
goog.math.Long.prototype.getLowBits = function() {
return this.low_;
};
/** @return {number} The low 32-bits as an unsigned value. */
goog.math.Long.prototype.getLowBitsUnsigned = function() {
return (this.low_ >= 0) ? this.low_ :
goog.math.Long.TWO_PWR_32_DBL_ + this.low_;
};
/**
* @return {number} Returns the number of bits needed to represent the absolute
* value of this Long.
*/
goog.math.Long.prototype.getNumBitsAbs = function() {
if (this.isNegative()) {
if (this.equals(goog.math.Long.getMinValue())) {
return 64;
} else {
return this.negate().getNumBitsAbs();
}
} else {
var val = this.high_ != 0 ? this.high_ : this.low_;
for (var bit = 31; bit > 0; bit--) {
if ((val & (1 << bit)) != 0) {
break;
}
}
return this.high_ != 0 ? bit + 33 : bit + 1;
}
};
/** @return {boolean} Whether this value is zero. */
goog.math.Long.prototype.isZero = function() {
return this.high_ == 0 && this.low_ == 0;
};
/** @return {boolean} Whether this value is negative. */
goog.math.Long.prototype.isNegative = function() {
return this.high_ < 0;
};
/** @return {boolean} Whether this value is odd. */
goog.math.Long.prototype.isOdd = function() {
return (this.low_ & 1) == 1;
};
/**
* @param {goog.math.Long} other Long to compare against.
* @return {boolean} Whether this Long equals the other.
*/
goog.math.Long.prototype.equals = function(other) {
return (this.high_ == other.high_) && (this.low_ == other.low_);
};
/**
* @param {goog.math.Long} other Long to compare against.
* @return {boolean} Whether this Long does not equal the other.
*/
goog.math.Long.prototype.notEquals = function(other) {
return (this.high_ != other.high_) || (this.low_ != other.low_);
};
/**
* @param {goog.math.Long} other Long to compare against.
* @return {boolean} Whether this Long is less than the other.
*/
goog.math.Long.prototype.lessThan = function(other) {
return this.compare(other) < 0;
};
/**
* @param {goog.math.Long} other Long to compare against.
* @return {boolean} Whether this Long is less than or equal to the other.
*/
goog.math.Long.prototype.lessThanOrEqual = function(other) {
return this.compare(other) <= 0;
};
/**
* @param {goog.math.Long} other Long to compare against.
* @return {boolean} Whether this Long is greater than the other.
*/
goog.math.Long.prototype.greaterThan = function(other) {
return this.compare(other) > 0;
};
/**
* @param {goog.math.Long} other Long to compare against.
* @return {boolean} Whether this Long is greater than or equal to the other.
*/
goog.math.Long.prototype.greaterThanOrEqual = function(other) {
return this.compare(other) >= 0;
};
/**
* Compares this Long with the given one.
* @param {goog.math.Long} other Long to compare against.
* @return {number} 0 if they are the same, 1 if the this is greater, and -1
* if the given one is greater.
*/
goog.math.Long.prototype.compare = function(other) {
if (this.equals(other)) {
return 0;
}
var thisNeg = this.isNegative();
var otherNeg = other.isNegative();
if (thisNeg && !otherNeg) {
return -1;
}
if (!thisNeg && otherNeg) {
return 1;
}
// at this point, the signs are the same, so subtraction will not overflow
if (this.subtract(other).isNegative()) {
return -1;
} else {
return 1;
}
};
/** @return {!goog.math.Long} The negation of this value. */
goog.math.Long.prototype.negate = function() {
if (this.equals(goog.math.Long.getMinValue())) {
return goog.math.Long.getMinValue();
} else {
return this.not().add(goog.math.Long.getOne());
}
};
/**
* Returns the sum of this and the given Long.
* @param {goog.math.Long} other Long to add to this one.
* @return {!goog.math.Long} The sum of this and the given Long.
*/
goog.math.Long.prototype.add = function(other) {
// Divide each number into 4 chunks of 16 bits, and then sum the chunks.
var a48 = this.high_ >>> 16;
var a32 = this.high_ & 0xFFFF;
var a16 = this.low_ >>> 16;
var a00 = this.low_ & 0xFFFF;
var b48 = other.high_ >>> 16;
var b32 = other.high_ & 0xFFFF;
var b16 = other.low_ >>> 16;
var b00 = other.low_ & 0xFFFF;
var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
c00 += a00 + b00;
c16 += c00 >>> 16;
c00 &= 0xFFFF;
c16 += a16 + b16;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c32 += a32 + b32;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c48 += a48 + b48;
c48 &= 0xFFFF;
return goog.math.Long.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
};
/**
* Returns the difference of this and the given Long.
* @param {goog.math.Long} other Long to subtract from this.
* @return {!goog.math.Long} The difference of this and the given Long.
*/
goog.math.Long.prototype.subtract = function(other) {
return this.add(other.negate());
};
/**
* Returns the product of this and the given long.
* @param {goog.math.Long} other Long to multiply with this.
* @return {!goog.math.Long} The product of this and the other.
*/
goog.math.Long.prototype.multiply = function(other) {
if (this.isZero()) {
return goog.math.Long.getZero();
} else if (other.isZero()) {
return goog.math.Long.getZero();
}
if (this.equals(goog.math.Long.getMinValue())) {
return other.isOdd() ? goog.math.Long.getMinValue() :
goog.math.Long.getZero();
} else if (other.equals(goog.math.Long.getMinValue())) {
return this.isOdd() ? goog.math.Long.getMinValue() :
goog.math.Long.getZero();
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().multiply(other.negate());
} else {
return this.negate().multiply(other).negate();
}
} else if (other.isNegative()) {
return this.multiply(other.negate()).negate();
}
// If both longs are small, use float multiplication
if (this.lessThan(goog.math.Long.getTwoPwr24()) &&
other.lessThan(goog.math.Long.getTwoPwr24())) {
return goog.math.Long.fromNumber(this.toNumber() * other.toNumber());
}
// Divide each long into 4 chunks of 16 bits, and then add up 4x4 products.
// We can skip products that would overflow.
var a48 = this.high_ >>> 16;
var a32 = this.high_ & 0xFFFF;
var a16 = this.low_ >>> 16;
var a00 = this.low_ & 0xFFFF;
var b48 = other.high_ >>> 16;
var b32 = other.high_ & 0xFFFF;
var b16 = other.low_ >>> 16;
var b00 = other.low_ & 0xFFFF;
var c48 = 0, c32 = 0, c16 = 0, c00 = 0;
c00 += a00 * b00;
c16 += c00 >>> 16;
c00 &= 0xFFFF;
c16 += a16 * b00;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c16 += a00 * b16;
c32 += c16 >>> 16;
c16 &= 0xFFFF;
c32 += a32 * b00;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c32 += a16 * b16;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c32 += a00 * b32;
c48 += c32 >>> 16;
c32 &= 0xFFFF;
c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;
c48 &= 0xFFFF;
return goog.math.Long.fromBits((c16 << 16) | c00, (c48 << 16) | c32);
};
/**
* Returns this Long divided by the given one.
* @param {goog.math.Long} other Long by which to divide.
* @return {!goog.math.Long} This Long divided by the given one.
*/
goog.math.Long.prototype.div = function(other) {
if (other.isZero()) {
throw Error('division by zero');
} else if (this.isZero()) {
return goog.math.Long.getZero();
}
if (this.equals(goog.math.Long.getMinValue())) {
if (other.equals(goog.math.Long.getOne()) ||
other.equals(goog.math.Long.getNegOne())) {
return goog.math.Long.getMinValue(); // recall -MIN_VALUE == MIN_VALUE
} else if (other.equals(goog.math.Long.getMinValue())) {
return goog.math.Long.getOne();
} else {
// At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|.
var halfThis = this.shiftRight(1);
var approx = halfThis.div(other).shiftLeft(1);
if (approx.equals(goog.math.Long.getZero())) {
return other.isNegative() ? goog.math.Long.getOne() :
goog.math.Long.getNegOne();
} else {
var rem = this.subtract(other.multiply(approx));
var result = approx.add(rem.div(other));
return result;
}
}
} else if (other.equals(goog.math.Long.getMinValue())) {
return goog.math.Long.getZero();
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().div(other.negate());
} else {
return this.negate().div(other).negate();
}
} else if (other.isNegative()) {
return this.div(other.negate()).negate();
}
// Repeat the following until the remainder is less than other: find a
// floating-point that approximates remainder / other *from below*, add this
// into the result, and subtract it from the remainder. It is critical that
// the approximate value is less than or equal to the real value so that the
// remainder never becomes negative.
var res = goog.math.Long.getZero();
var rem = this;
while (rem.greaterThanOrEqual(other)) {
// Approximate the result of division. This may be a little greater or
// smaller than the actual value.
var approx = Math.max(1, Math.floor(rem.toNumber() / other.toNumber()));
// We will tweak the approximate result by changing it in the 48-th digit or
// the smallest non-fractional digit, whichever is larger.
var log2 = Math.ceil(Math.log(approx) / Math.LN2);
var delta = (log2 <= 48) ? 1 : Math.pow(2, log2 - 48);
// Decrease the approximation until it is smaller than the remainder. Note
// that if it is too large, the product overflows and is negative.
var approxRes = goog.math.Long.fromNumber(approx);
var approxRem = approxRes.multiply(other);
while (approxRem.isNegative() || approxRem.greaterThan(rem)) {
approx -= delta;
approxRes = goog.math.Long.fromNumber(approx);
approxRem = approxRes.multiply(other);
}
// We know the answer can't be zero... and actually, zero would cause
// infinite recursion since we would make no progress.
if (approxRes.isZero()) {
approxRes = goog.math.Long.getOne();
}
res = res.add(approxRes);
rem = rem.subtract(approxRem);
}
return res;
};
/**
* Returns this Long modulo the given one.
* @param {goog.math.Long} other Long by which to mod.
* @return {!goog.math.Long} This Long modulo the given one.
*/
goog.math.Long.prototype.modulo = function(other) {
return this.subtract(this.div(other).multiply(other));
};
/** @return {!goog.math.Long} The bitwise-NOT of this value. */
goog.math.Long.prototype.not = function() {
return goog.math.Long.fromBits(~this.low_, ~this.high_);
};
/**
* Returns the bitwise-AND of this Long and the given one.
* @param {goog.math.Long} other The Long with which to AND.
* @return {!goog.math.Long} The bitwise-AND of this and the other.
*/
goog.math.Long.prototype.and = function(other) {
return goog.math.Long.fromBits(
this.low_ & other.low_, this.high_ & other.high_);
};
/**
* Returns the bitwise-OR of this Long and the given one.
* @param {goog.math.Long} other The Long with which to OR.
* @return {!goog.math.Long} The bitwise-OR of this and the other.
*/
goog.math.Long.prototype.or = function(other) {
return goog.math.Long.fromBits(
this.low_ | other.low_, this.high_ | other.high_);
};
/**
* Returns the bitwise-XOR of this Long and the given one.
* @param {goog.math.Long} other The Long with which to XOR.
* @return {!goog.math.Long} The bitwise-XOR of this and the other.
*/
goog.math.Long.prototype.xor = function(other) {
return goog.math.Long.fromBits(
this.low_ ^ other.low_, this.high_ ^ other.high_);
};
/**
* Returns this Long with bits shifted to the left by the given amount.
* @param {number} numBits The number of bits by which to shift.
* @return {!goog.math.Long} This shifted to the left by the given amount.
*/
goog.math.Long.prototype.shiftLeft = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var low = this.low_;
if (numBits < 32) {
var high = this.high_;
return goog.math.Long.fromBits(
low << numBits, (high << numBits) | (low >>> (32 - numBits)));
} else {
return goog.math.Long.fromBits(0, low << (numBits - 32));
}
}
};
/**
* Returns this Long with bits shifted to the right by the given amount.
* The new leading bits match the current sign bit.
* @param {number} numBits The number of bits by which to shift.
* @return {!goog.math.Long} This shifted to the right by the given amount.
*/
goog.math.Long.prototype.shiftRight = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var high = this.high_;
if (numBits < 32) {
var low = this.low_;
return goog.math.Long.fromBits(
(low >>> numBits) | (high << (32 - numBits)), high >> numBits);
} else {
return goog.math.Long.fromBits(
high >> (numBits - 32), high >= 0 ? 0 : -1);
}
}
};
/**
* Returns this Long with bits shifted to the right by the given amount, with
* zeros placed into the new leading bits.
* @param {number} numBits The number of bits by which to shift.
* @return {!goog.math.Long} This shifted to the right by the given amount, with
* zeros placed into the new leading bits.
*/
goog.math.Long.prototype.shiftRightUnsigned = function(numBits) {
numBits &= 63;
if (numBits == 0) {
return this;
} else {
var high = this.high_;
if (numBits < 32) {
var low = this.low_;
return goog.math.Long.fromBits(
(low >>> numBits) | (high << (32 - numBits)), high >>> numBits);
} else if (numBits == 32) {
return goog.math.Long.fromBits(high, 0);
} else {
return goog.math.Long.fromBits(high >>> (numBits - 32), 0);
}
}
};
/**
* @enum {number} Ids of commonly requested Long instances.
* @private
*/
goog.math.Long.ValueCacheId_ = {
MAX_VALUE: 1,
MIN_VALUE: 2,
ZERO: 3,
ONE: 4,
NEG_ONE: 5,
TWO_PWR_24: 6
};
|
/sanity-nupic-0.0.15.tar.gz/sanity-nupic-0.0.15/htmsanity/nupic/sanity/public/demos/out/goog/math/long.js
| 0.890556 | 0.507507 |
long.js
|
pypi
|
goog.provide('goog.math.Integer');
/**
* Constructs a two's-complement integer an array containing bits of the
* integer in 32-bit (signed) pieces, given in little-endian order (i.e.,
* lowest-order bits in the first piece), and the sign of -1 or 0.
*
* See the from* functions below for other convenient ways of constructing
* Integers.
*
* The internal representation of an integer is an array of 32-bit signed
* pieces, along with a sign (0 or -1) that indicates the contents of all the
* other 32-bit pieces out to infinity. We use 32-bit pieces because these are
* the size of integers on which Javascript performs bit-operations. For
* operations like addition and multiplication, we split each number into 16-bit
* pieces, which can easily be multiplied within Javascript's floating-point
* representation without overflow or change in sign.
*
* @struct
* @constructor
* @param {Array<number>} bits Array containing the bits of the number.
* @param {number} sign The sign of the number: -1 for negative and 0 positive.
* @final
*/
goog.math.Integer = function(bits, sign) {
/**
* @type {!Array<number>}
* @private
*/
this.bits_ = [];
/**
* @type {number}
* @private
*/
this.sign_ = sign;
// Copy the 32-bit signed integer values passed in. We prune out those at the
// top that equal the sign since they are redundant.
var top = true;
for (var i = bits.length - 1; i >= 0; i--) {
var val = bits[i] | 0;
if (!top || val != sign) {
this.bits_[i] = val;
top = false;
}
}
};
// NOTE: Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the
// from* methods on which they depend.
/**
* A cache of the Integer representations of small integer values.
* @type {!Object}
* @private
*/
goog.math.Integer.IntCache_ = {};
/**
* Returns an Integer representing the given (32-bit) integer value.
* @param {number} value A 32-bit integer value.
* @return {!goog.math.Integer} The corresponding Integer value.
*/
goog.math.Integer.fromInt = function(value) {
if (-128 <= value && value < 128) {
var cachedObj = goog.math.Integer.IntCache_[value];
if (cachedObj) {
return cachedObj;
}
}
var obj = new goog.math.Integer([value | 0], value < 0 ? -1 : 0);
if (-128 <= value && value < 128) {
goog.math.Integer.IntCache_[value] = obj;
}
return obj;
};
/**
* Returns an Integer representing the given value, provided that it is a finite
* number. Otherwise, zero is returned.
* @param {number} value The value in question.
* @return {!goog.math.Integer} The corresponding Integer value.
*/
goog.math.Integer.fromNumber = function(value) {
if (isNaN(value) || !isFinite(value)) {
return goog.math.Integer.ZERO;
} else if (value < 0) {
return goog.math.Integer.fromNumber(-value).negate();
} else {
var bits = [];
var pow = 1;
for (var i = 0; value >= pow; i++) {
bits[i] = (value / pow) | 0;
pow *= goog.math.Integer.TWO_PWR_32_DBL_;
}
return new goog.math.Integer(bits, 0);
}
};
/**
* Returns a Integer representing the value that comes by concatenating the
* given entries, each is assumed to be 32 signed bits, given in little-endian
* order (lowest order bits in the lowest index), and sign-extending the highest
* order 32-bit value.
* @param {Array<number>} bits The bits of the number, in 32-bit signed pieces,
* in little-endian order.
* @return {!goog.math.Integer} The corresponding Integer value.
*/
goog.math.Integer.fromBits = function(bits) {
var high = bits[bits.length - 1];
return new goog.math.Integer(bits, high & (1 << 31) ? -1 : 0);
};
/**
* Returns an Integer representation of the given string, written using the
* given radix.
* @param {string} str The textual representation of the Integer.
* @param {number=} opt_radix The radix in which the text is written.
* @return {!goog.math.Integer} The corresponding Integer value.
*/
goog.math.Integer.fromString = function(str, opt_radix) {
if (str.length == 0) {
throw Error('number format error: empty string');
}
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (str.charAt(0) == '-') {
return goog.math.Integer.fromString(str.substring(1), radix).negate();
} else if (str.indexOf('-') >= 0) {
throw Error('number format error: interior "-" character');
}
// Do several (8) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = goog.math.Integer.fromNumber(Math.pow(radix, 8));
var result = goog.math.Integer.ZERO;
for (var i = 0; i < str.length; i += 8) {
var size = Math.min(8, str.length - i);
var value = parseInt(str.substring(i, i + size), radix);
if (size < 8) {
var power = goog.math.Integer.fromNumber(Math.pow(radix, size));
result = result.multiply(power).add(goog.math.Integer.fromNumber(value));
} else {
result = result.multiply(radixToPower);
result = result.add(goog.math.Integer.fromNumber(value));
}
}
return result;
};
/**
* A number used repeatedly in calculations. This must appear before the first
* call to the from* functions below.
* @type {number}
* @private
*/
goog.math.Integer.TWO_PWR_32_DBL_ = (1 << 16) * (1 << 16);
/** @type {!goog.math.Integer} */
goog.math.Integer.ZERO = goog.math.Integer.fromInt(0);
/** @type {!goog.math.Integer} */
goog.math.Integer.ONE = goog.math.Integer.fromInt(1);
/**
* @type {!goog.math.Integer}
* @private
*/
goog.math.Integer.TWO_PWR_24_ = goog.math.Integer.fromInt(1 << 24);
/**
* Returns the value, assuming it is a 32-bit integer.
* @return {number} The corresponding int value.
*/
goog.math.Integer.prototype.toInt = function() {
return this.bits_.length > 0 ? this.bits_[0] : this.sign_;
};
/** @return {number} The closest floating-point representation to this value. */
goog.math.Integer.prototype.toNumber = function() {
if (this.isNegative()) {
return -this.negate().toNumber();
} else {
var val = 0;
var pow = 1;
for (var i = 0; i < this.bits_.length; i++) {
val += this.getBitsUnsigned(i) * pow;
pow *= goog.math.Integer.TWO_PWR_32_DBL_;
}
return val;
}
};
/**
* @param {number=} opt_radix The radix in which the text should be written.
* @return {string} The textual representation of this value.
* @override
*/
goog.math.Integer.prototype.toString = function(opt_radix) {
var radix = opt_radix || 10;
if (radix < 2 || 36 < radix) {
throw Error('radix out of range: ' + radix);
}
if (this.isZero()) {
return '0';
} else if (this.isNegative()) {
return '-' + this.negate().toString(radix);
}
// Do several (6) digits each time through the loop, so as to
// minimize the calls to the very expensive emulated div.
var radixToPower = goog.math.Integer.fromNumber(Math.pow(radix, 6));
var rem = this;
var result = '';
while (true) {
var remDiv = rem.divide(radixToPower);
// The right shifting fixes negative values in the case when
// intval >= 2^31; for more details see
// https://github.com/google/closure-library/pull/498
var intval = rem.subtract(remDiv.multiply(radixToPower)).toInt() >>> 0;
var digits = intval.toString(radix);
rem = remDiv;
if (rem.isZero()) {
return digits + result;
} else {
while (digits.length < 6) {
digits = '0' + digits;
}
result = '' + digits + result;
}
}
};
/**
* Returns the index-th 32-bit (signed) piece of the Integer according to
* little-endian order (i.e., index 0 contains the smallest bits).
* @param {number} index The index in question.
* @return {number} The requested 32-bits as a signed number.
*/
goog.math.Integer.prototype.getBits = function(index) {
if (index < 0) {
return 0; // Allowing this simplifies bit shifting operations below...
} else if (index < this.bits_.length) {
return this.bits_[index];
} else {
return this.sign_;
}
};
/**
* Returns the index-th 32-bit piece as an unsigned number.
* @param {number} index The index in question.
* @return {number} The requested 32-bits as an unsigned number.
*/
goog.math.Integer.prototype.getBitsUnsigned = function(index) {
var val = this.getBits(index);
return val >= 0 ? val : goog.math.Integer.TWO_PWR_32_DBL_ + val;
};
/** @return {number} The sign bit of this number, -1 or 0. */
goog.math.Integer.prototype.getSign = function() {
return this.sign_;
};
/** @return {boolean} Whether this value is zero. */
goog.math.Integer.prototype.isZero = function() {
if (this.sign_ != 0) {
return false;
}
for (var i = 0; i < this.bits_.length; i++) {
if (this.bits_[i] != 0) {
return false;
}
}
return true;
};
/** @return {boolean} Whether this value is negative. */
goog.math.Integer.prototype.isNegative = function() {
return this.sign_ == -1;
};
/** @return {boolean} Whether this value is odd. */
goog.math.Integer.prototype.isOdd = function() {
return (this.bits_.length == 0) && (this.sign_ == -1) ||
(this.bits_.length > 0) && ((this.bits_[0] & 1) != 0);
};
/**
* @param {goog.math.Integer} other Integer to compare against.
* @return {boolean} Whether this Integer equals the other.
*/
goog.math.Integer.prototype.equals = function(other) {
if (this.sign_ != other.sign_) {
return false;
}
var len = Math.max(this.bits_.length, other.bits_.length);
for (var i = 0; i < len; i++) {
if (this.getBits(i) != other.getBits(i)) {
return false;
}
}
return true;
};
/**
* @param {goog.math.Integer} other Integer to compare against.
* @return {boolean} Whether this Integer does not equal the other.
*/
goog.math.Integer.prototype.notEquals = function(other) {
return !this.equals(other);
};
/**
* @param {goog.math.Integer} other Integer to compare against.
* @return {boolean} Whether this Integer is greater than the other.
*/
goog.math.Integer.prototype.greaterThan = function(other) {
return this.compare(other) > 0;
};
/**
* @param {goog.math.Integer} other Integer to compare against.
* @return {boolean} Whether this Integer is greater than or equal to the other.
*/
goog.math.Integer.prototype.greaterThanOrEqual = function(other) {
return this.compare(other) >= 0;
};
/**
* @param {goog.math.Integer} other Integer to compare against.
* @return {boolean} Whether this Integer is less than the other.
*/
goog.math.Integer.prototype.lessThan = function(other) {
return this.compare(other) < 0;
};
/**
* @param {goog.math.Integer} other Integer to compare against.
* @return {boolean} Whether this Integer is less than or equal to the other.
*/
goog.math.Integer.prototype.lessThanOrEqual = function(other) {
return this.compare(other) <= 0;
};
/**
* Compares this Integer with the given one.
* @param {goog.math.Integer} other Integer to compare against.
* @return {number} 0 if they are the same, 1 if the this is greater, and -1
* if the given one is greater.
*/
goog.math.Integer.prototype.compare = function(other) {
var diff = this.subtract(other);
if (diff.isNegative()) {
return -1;
} else if (diff.isZero()) {
return 0;
} else {
return +1;
}
};
/**
* Returns an integer with only the first numBits bits of this value, sign
* extended from the final bit.
* @param {number} numBits The number of bits by which to shift.
* @return {!goog.math.Integer} The shorted integer value.
*/
goog.math.Integer.prototype.shorten = function(numBits) {
var arr_index = (numBits - 1) >> 5;
var bit_index = (numBits - 1) % 32;
var bits = [];
for (var i = 0; i < arr_index; i++) {
bits[i] = this.getBits(i);
}
var sigBits = bit_index == 31 ? 0xFFFFFFFF : (1 << (bit_index + 1)) - 1;
var val = this.getBits(arr_index) & sigBits;
if (val & (1 << bit_index)) {
val |= 0xFFFFFFFF - sigBits;
bits[arr_index] = val;
return new goog.math.Integer(bits, -1);
} else {
bits[arr_index] = val;
return new goog.math.Integer(bits, 0);
}
};
/** @return {!goog.math.Integer} The negation of this value. */
goog.math.Integer.prototype.negate = function() {
return this.not().add(goog.math.Integer.ONE);
};
/**
* Returns the sum of this and the given Integer.
* @param {goog.math.Integer} other The Integer to add to this.
* @return {!goog.math.Integer} The Integer result.
*/
goog.math.Integer.prototype.add = function(other) {
var len = Math.max(this.bits_.length, other.bits_.length);
var arr = [];
var carry = 0;
for (var i = 0; i <= len; i++) {
var a1 = this.getBits(i) >>> 16;
var a0 = this.getBits(i) & 0xFFFF;
var b1 = other.getBits(i) >>> 16;
var b0 = other.getBits(i) & 0xFFFF;
var c0 = carry + a0 + b0;
var c1 = (c0 >>> 16) + a1 + b1;
carry = c1 >>> 16;
c0 &= 0xFFFF;
c1 &= 0xFFFF;
arr[i] = (c1 << 16) | c0;
}
return goog.math.Integer.fromBits(arr);
};
/**
* Returns the difference of this and the given Integer.
* @param {goog.math.Integer} other The Integer to subtract from this.
* @return {!goog.math.Integer} The Integer result.
*/
goog.math.Integer.prototype.subtract = function(other) {
return this.add(other.negate());
};
/**
* Returns the product of this and the given Integer.
* @param {goog.math.Integer} other The Integer to multiply against this.
* @return {!goog.math.Integer} The product of this and the other.
*/
goog.math.Integer.prototype.multiply = function(other) {
if (this.isZero()) {
return goog.math.Integer.ZERO;
} else if (other.isZero()) {
return goog.math.Integer.ZERO;
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().multiply(other.negate());
} else {
return this.negate().multiply(other).negate();
}
} else if (other.isNegative()) {
return this.multiply(other.negate()).negate();
}
// If both numbers are small, use float multiplication
if (this.lessThan(goog.math.Integer.TWO_PWR_24_) &&
other.lessThan(goog.math.Integer.TWO_PWR_24_)) {
return goog.math.Integer.fromNumber(this.toNumber() * other.toNumber());
}
// Fill in an array of 16-bit products.
var len = this.bits_.length + other.bits_.length;
var arr = [];
for (var i = 0; i < 2 * len; i++) {
arr[i] = 0;
}
for (var i = 0; i < this.bits_.length; i++) {
for (var j = 0; j < other.bits_.length; j++) {
var a1 = this.getBits(i) >>> 16;
var a0 = this.getBits(i) & 0xFFFF;
var b1 = other.getBits(j) >>> 16;
var b0 = other.getBits(j) & 0xFFFF;
arr[2 * i + 2 * j] += a0 * b0;
goog.math.Integer.carry16_(arr, 2 * i + 2 * j);
arr[2 * i + 2 * j + 1] += a1 * b0;
goog.math.Integer.carry16_(arr, 2 * i + 2 * j + 1);
arr[2 * i + 2 * j + 1] += a0 * b1;
goog.math.Integer.carry16_(arr, 2 * i + 2 * j + 1);
arr[2 * i + 2 * j + 2] += a1 * b1;
goog.math.Integer.carry16_(arr, 2 * i + 2 * j + 2);
}
}
// Combine the 16-bit values into 32-bit values.
for (var i = 0; i < len; i++) {
arr[i] = (arr[2 * i + 1] << 16) | arr[2 * i];
}
for (var i = len; i < 2 * len; i++) {
arr[i] = 0;
}
return new goog.math.Integer(arr, 0);
};
/**
* Carries any overflow from the given index into later entries.
* @param {Array<number>} bits Array of 16-bit values in little-endian order.
* @param {number} index The index in question.
* @private
*/
goog.math.Integer.carry16_ = function(bits, index) {
while ((bits[index] & 0xFFFF) != bits[index]) {
bits[index + 1] += bits[index] >>> 16;
bits[index] &= 0xFFFF;
}
};
/**
* Returns "this" Integer divided by the given one. Both "this" and the given
* Integer MUST be positive.
*
* This method is only needed for very large numbers (>10^308),
* for which the original division algorithm gets into an infinite
* loop (see https://github.com/google/closure-library/issues/500).
*
* The algorithm has some possible performance enhancements (or
* could be rewritten entirely), it's just an initial solution for
* the issue linked above.
*
* @param {!goog.math.Integer} other The Integer to divide "this" by.
* @return {!goog.math.Integer} "this" value divided by the given one.
* @private
*/
goog.math.Integer.prototype.slowDivide_ = function(other) {
if (this.isNegative() || other.isNegative()) {
throw Error('slowDivide_ only works with positive integers.');
}
var twoPower = goog.math.Integer.ONE;
var multiple = other;
// First we have to figure out what the highest bit of the result
// is, so we increase "twoPower" and "multiple" until "multiple"
// exceeds "this".
while (multiple.lessThanOrEqual(this)) {
twoPower = twoPower.shiftLeft(1);
multiple = multiple.shiftLeft(1);
}
// Rewind by one power of two, giving us the highest bit of the
// result.
var res = twoPower.shiftRight(1);
var total = multiple.shiftRight(1);
// Now we starting decreasing "multiple" and "twoPower" to find the
// rest of the bits of the result.
var total2;
multiple = multiple.shiftRight(2);
twoPower = twoPower.shiftRight(2);
while (!multiple.isZero()) {
// whenever we can add "multiple" to the total and not exceed
// "this", that means we've found a 1 bit. Else we've found a 0
// and don't need to add to the result.
total2 = total.add(multiple);
if (total2.lessThanOrEqual(this)) {
res = res.add(twoPower);
total = total2;
}
multiple = multiple.shiftRight(1);
twoPower = twoPower.shiftRight(1);
}
return res;
};
/**
* Returns this Integer divided by the given one.
* @param {!goog.math.Integer} other The Integer to divide this by.
* @return {!goog.math.Integer} This value divided by the given one.
*/
goog.math.Integer.prototype.divide = function(other) {
if (other.isZero()) {
throw Error('division by zero');
} else if (this.isZero()) {
return goog.math.Integer.ZERO;
}
if (this.isNegative()) {
if (other.isNegative()) {
return this.negate().divide(other.negate());
} else {
return this.negate().divide(other).negate();
}
} else if (other.isNegative()) {
return this.divide(other.negate()).negate();
}
// Have to degrade to slowDivide for Very Large Numbers, because
// they're out of range for the floating-point approximation
// technique used below.
if (this.bits_.length > 30) {
return this.slowDivide_(other);
}
// Repeat the following until the remainder is less than other: find a
// floating-point that approximates remainder / other *from below*, add this
// into the result, and subtract it from the remainder. It is critical that
// the approximate value is less than or equal to the real value so that the
// remainder never becomes negative.
var res = goog.math.Integer.ZERO;
var rem = this;
while (rem.greaterThanOrEqual(other)) {
// Approximate the result of division. This may be a little greater or
// smaller than the actual value.
var approx = Math.max(1, Math.floor(rem.toNumber() / other.toNumber()));
// We will tweak the approximate result by changing it in the 48-th digit or
// the smallest non-fractional digit, whichever is larger.
var log2 = Math.ceil(Math.log(approx) / Math.LN2);
var delta = (log2 <= 48) ? 1 : Math.pow(2, log2 - 48);
// Decrease the approximation until it is smaller than the remainder. Note
// that if it is too large, the product overflows and is negative.
var approxRes = goog.math.Integer.fromNumber(approx);
var approxRem = approxRes.multiply(other);
while (approxRem.isNegative() || approxRem.greaterThan(rem)) {
approx -= delta;
approxRes = goog.math.Integer.fromNumber(approx);
approxRem = approxRes.multiply(other);
}
// We know the answer can't be zero... and actually, zero would cause
// infinite recursion since we would make no progress.
if (approxRes.isZero()) {
approxRes = goog.math.Integer.ONE;
}
res = res.add(approxRes);
rem = rem.subtract(approxRem);
}
return res;
};
/**
* Returns this Integer modulo the given one.
* @param {!goog.math.Integer} other The Integer by which to mod.
* @return {!goog.math.Integer} This value modulo the given one.
*/
goog.math.Integer.prototype.modulo = function(other) {
return this.subtract(this.divide(other).multiply(other));
};
/** @return {!goog.math.Integer} The bitwise-NOT of this value. */
goog.math.Integer.prototype.not = function() {
var len = this.bits_.length;
var arr = [];
for (var i = 0; i < len; i++) {
arr[i] = ~this.bits_[i];
}
return new goog.math.Integer(arr, ~this.sign_);
};
/**
* Returns the bitwise-AND of this Integer and the given one.
* @param {goog.math.Integer} other The Integer to AND with this.
* @return {!goog.math.Integer} The bitwise-AND of this and the other.
*/
goog.math.Integer.prototype.and = function(other) {
var len = Math.max(this.bits_.length, other.bits_.length);
var arr = [];
for (var i = 0; i < len; i++) {
arr[i] = this.getBits(i) & other.getBits(i);
}
return new goog.math.Integer(arr, this.sign_ & other.sign_);
};
/**
* Returns the bitwise-OR of this Integer and the given one.
* @param {goog.math.Integer} other The Integer to OR with this.
* @return {!goog.math.Integer} The bitwise-OR of this and the other.
*/
goog.math.Integer.prototype.or = function(other) {
var len = Math.max(this.bits_.length, other.bits_.length);
var arr = [];
for (var i = 0; i < len; i++) {
arr[i] = this.getBits(i) | other.getBits(i);
}
return new goog.math.Integer(arr, this.sign_ | other.sign_);
};
/**
* Returns the bitwise-XOR of this Integer and the given one.
* @param {goog.math.Integer} other The Integer to XOR with this.
* @return {!goog.math.Integer} The bitwise-XOR of this and the other.
*/
goog.math.Integer.prototype.xor = function(other) {
var len = Math.max(this.bits_.length, other.bits_.length);
var arr = [];
for (var i = 0; i < len; i++) {
arr[i] = this.getBits(i) ^ other.getBits(i);
}
return new goog.math.Integer(arr, this.sign_ ^ other.sign_);
};
/**
* Returns this value with bits shifted to the left by the given amount.
* @param {number} numBits The number of bits by which to shift.
* @return {!goog.math.Integer} This shifted to the left by the given amount.
*/
goog.math.Integer.prototype.shiftLeft = function(numBits) {
var arr_delta = numBits >> 5;
var bit_delta = numBits % 32;
var len = this.bits_.length + arr_delta + (bit_delta > 0 ? 1 : 0);
var arr = [];
for (var i = 0; i < len; i++) {
if (bit_delta > 0) {
arr[i] = (this.getBits(i - arr_delta) << bit_delta) |
(this.getBits(i - arr_delta - 1) >>> (32 - bit_delta));
} else {
arr[i] = this.getBits(i - arr_delta);
}
}
return new goog.math.Integer(arr, this.sign_);
};
/**
* Returns this value with bits shifted to the right by the given amount.
* @param {number} numBits The number of bits by which to shift.
* @return {!goog.math.Integer} This shifted to the right by the given amount.
*/
goog.math.Integer.prototype.shiftRight = function(numBits) {
var arr_delta = numBits >> 5;
var bit_delta = numBits % 32;
var len = this.bits_.length - arr_delta;
var arr = [];
for (var i = 0; i < len; i++) {
if (bit_delta > 0) {
arr[i] = (this.getBits(i + arr_delta) >>> bit_delta) |
(this.getBits(i + arr_delta + 1) << (32 - bit_delta));
} else {
arr[i] = this.getBits(i + arr_delta);
}
}
return new goog.math.Integer(arr, this.sign_);
};
|
/sanity-nupic-0.0.15.tar.gz/sanity-nupic-0.0.15/htmsanity/nupic/sanity/public/demos/out/goog/math/integer.js
| 0.895694 | 0.561936 |
integer.js
|
pypi
|
goog.provide('goog.math');
goog.require('goog.array');
goog.require('goog.asserts');
/**
* Returns a random integer greater than or equal to 0 and less than {@code a}.
* @param {number} a The upper bound for the random integer (exclusive).
* @return {number} A random integer N such that 0 <= N < a.
*/
goog.math.randomInt = function(a) {
return Math.floor(Math.random() * a);
};
/**
* Returns a random number greater than or equal to {@code a} and less than
* {@code b}.
* @param {number} a The lower bound for the random number (inclusive).
* @param {number} b The upper bound for the random number (exclusive).
* @return {number} A random number N such that a <= N < b.
*/
goog.math.uniformRandom = function(a, b) {
return a + Math.random() * (b - a);
};
/**
* Takes a number and clamps it to within the provided bounds.
* @param {number} value The input number.
* @param {number} min The minimum value to return.
* @param {number} max The maximum value to return.
* @return {number} The input number if it is within bounds, or the nearest
* number within the bounds.
*/
goog.math.clamp = function(value, min, max) {
return Math.min(Math.max(value, min), max);
};
/**
* The % operator in JavaScript returns the remainder of a / b, but differs from
* some other languages in that the result will have the same sign as the
* dividend. For example, -1 % 8 == -1, whereas in some other languages
* (such as Python) the result would be 7. This function emulates the more
* correct modulo behavior, which is useful for certain applications such as
* calculating an offset index in a circular list.
*
* @param {number} a The dividend.
* @param {number} b The divisor.
* @return {number} a % b where the result is between 0 and b (either 0 <= x < b
* or b < x <= 0, depending on the sign of b).
*/
goog.math.modulo = function(a, b) {
var r = a % b;
// If r and b differ in sign, add b to wrap the result to the correct sign.
return (r * b < 0) ? r + b : r;
};
/**
* Performs linear interpolation between values a and b. Returns the value
* between a and b proportional to x (when x is between 0 and 1. When x is
* outside this range, the return value is a linear extrapolation).
* @param {number} a A number.
* @param {number} b A number.
* @param {number} x The proportion between a and b.
* @return {number} The interpolated value between a and b.
*/
goog.math.lerp = function(a, b, x) {
return a + x * (b - a);
};
/**
* Tests whether the two values are equal to each other, within a certain
* tolerance to adjust for floating point errors.
* @param {number} a A number.
* @param {number} b A number.
* @param {number=} opt_tolerance Optional tolerance range. Defaults
* to 0.000001. If specified, should be greater than 0.
* @return {boolean} Whether {@code a} and {@code b} are nearly equal.
*/
goog.math.nearlyEquals = function(a, b, opt_tolerance) {
return Math.abs(a - b) <= (opt_tolerance || 0.000001);
};
// TODO(user): Rename to normalizeAngle, retaining old name as deprecated
// alias.
/**
* Normalizes an angle to be in range [0-360). Angles outside this range will
* be normalized to be the equivalent angle with that range.
* @param {number} angle Angle in degrees.
* @return {number} Standardized angle.
*/
goog.math.standardAngle = function(angle) {
return goog.math.modulo(angle, 360);
};
/**
* Normalizes an angle to be in range [0-2*PI). Angles outside this range will
* be normalized to be the equivalent angle with that range.
* @param {number} angle Angle in radians.
* @return {number} Standardized angle.
*/
goog.math.standardAngleInRadians = function(angle) {
return goog.math.modulo(angle, 2 * Math.PI);
};
/**
* Converts degrees to radians.
* @param {number} angleDegrees Angle in degrees.
* @return {number} Angle in radians.
*/
goog.math.toRadians = function(angleDegrees) {
return angleDegrees * Math.PI / 180;
};
/**
* Converts radians to degrees.
* @param {number} angleRadians Angle in radians.
* @return {number} Angle in degrees.
*/
goog.math.toDegrees = function(angleRadians) {
return angleRadians * 180 / Math.PI;
};
/**
* For a given angle and radius, finds the X portion of the offset.
* @param {number} degrees Angle in degrees (zero points in +X direction).
* @param {number} radius Radius.
* @return {number} The x-distance for the angle and radius.
*/
goog.math.angleDx = function(degrees, radius) {
return radius * Math.cos(goog.math.toRadians(degrees));
};
/**
* For a given angle and radius, finds the Y portion of the offset.
* @param {number} degrees Angle in degrees (zero points in +X direction).
* @param {number} radius Radius.
* @return {number} The y-distance for the angle and radius.
*/
goog.math.angleDy = function(degrees, radius) {
return radius * Math.sin(goog.math.toRadians(degrees));
};
/**
* Computes the angle between two points (x1,y1) and (x2,y2).
* Angle zero points in the +X direction, 90 degrees points in the +Y
* direction (down) and from there we grow clockwise towards 360 degrees.
* @param {number} x1 x of first point.
* @param {number} y1 y of first point.
* @param {number} x2 x of second point.
* @param {number} y2 y of second point.
* @return {number} Standardized angle in degrees of the vector from
* x1,y1 to x2,y2.
*/
goog.math.angle = function(x1, y1, x2, y2) {
return goog.math.standardAngle(
goog.math.toDegrees(Math.atan2(y2 - y1, x2 - x1)));
};
/**
* Computes the difference between startAngle and endAngle (angles in degrees).
* @param {number} startAngle Start angle in degrees.
* @param {number} endAngle End angle in degrees.
* @return {number} The number of degrees that when added to
* startAngle will result in endAngle. Positive numbers mean that the
* direction is clockwise. Negative numbers indicate a counter-clockwise
* direction.
* The shortest route (clockwise vs counter-clockwise) between the angles
* is used.
* When the difference is 180 degrees, the function returns 180 (not -180)
* angleDifference(30, 40) is 10, and angleDifference(40, 30) is -10.
* angleDifference(350, 10) is 20, and angleDifference(10, 350) is -20.
*/
goog.math.angleDifference = function(startAngle, endAngle) {
var d =
goog.math.standardAngle(endAngle) - goog.math.standardAngle(startAngle);
if (d > 180) {
d = d - 360;
} else if (d <= -180) {
d = 360 + d;
}
return d;
};
/**
* Returns the sign of a number as per the "sign" or "signum" function.
* @param {number} x The number to take the sign of.
* @return {number} -1 when negative, 1 when positive, 0 when 0. Preserves
* signed zeros and NaN.
*/
goog.math.sign = Math.sign || function(x) {
if (x > 0) {
return 1;
}
if (x < 0) {
return -1;
}
return x; // Preserves signed zeros and NaN.
};
/**
* JavaScript implementation of Longest Common Subsequence problem.
* http://en.wikipedia.org/wiki/Longest_common_subsequence
*
* Returns the longest possible array that is subarray of both of given arrays.
*
* @param {IArrayLike<S>} array1 First array of objects.
* @param {IArrayLike<T>} array2 Second array of objects.
* @param {Function=} opt_compareFn Function that acts as a custom comparator
* for the array ojects. Function should return true if objects are equal,
* otherwise false.
* @param {Function=} opt_collectorFn Function used to decide what to return
* as a result subsequence. It accepts 2 arguments: index of common element
* in the first array and index in the second. The default function returns
* element from the first array.
* @return {!Array<S|T>} A list of objects that are common to both arrays
* such that there is no common subsequence with size greater than the
* length of the list.
* @template S,T
*/
goog.math.longestCommonSubsequence = function(
array1, array2, opt_compareFn, opt_collectorFn) {
var compare = opt_compareFn || function(a, b) { return a == b; };
var collect = opt_collectorFn || function(i1, i2) { return array1[i1]; };
var length1 = array1.length;
var length2 = array2.length;
var arr = [];
for (var i = 0; i < length1 + 1; i++) {
arr[i] = [];
arr[i][0] = 0;
}
for (var j = 0; j < length2 + 1; j++) {
arr[0][j] = 0;
}
for (i = 1; i <= length1; i++) {
for (j = 1; j <= length2; j++) {
if (compare(array1[i - 1], array2[j - 1])) {
arr[i][j] = arr[i - 1][j - 1] + 1;
} else {
arr[i][j] = Math.max(arr[i - 1][j], arr[i][j - 1]);
}
}
}
// Backtracking
var result = [];
var i = length1, j = length2;
while (i > 0 && j > 0) {
if (compare(array1[i - 1], array2[j - 1])) {
result.unshift(collect(i - 1, j - 1));
i--;
j--;
} else {
if (arr[i - 1][j] > arr[i][j - 1]) {
i--;
} else {
j--;
}
}
}
return result;
};
/**
* Returns the sum of the arguments.
* @param {...number} var_args Numbers to add.
* @return {number} The sum of the arguments (0 if no arguments were provided,
* {@code NaN} if any of the arguments is not a valid number).
*/
goog.math.sum = function(var_args) {
return /** @type {number} */ (
goog.array.reduce(
arguments, function(sum, value) { return sum + value; }, 0));
};
/**
* Returns the arithmetic mean of the arguments.
* @param {...number} var_args Numbers to average.
* @return {number} The average of the arguments ({@code NaN} if no arguments
* were provided or any of the arguments is not a valid number).
*/
goog.math.average = function(var_args) {
return goog.math.sum.apply(null, arguments) / arguments.length;
};
/**
* Returns the unbiased sample variance of the arguments. For a definition,
* see e.g. http://en.wikipedia.org/wiki/Variance
* @param {...number} var_args Number samples to analyze.
* @return {number} The unbiased sample variance of the arguments (0 if fewer
* than two samples were provided, or {@code NaN} if any of the samples is
* not a valid number).
*/
goog.math.sampleVariance = function(var_args) {
var sampleSize = arguments.length;
if (sampleSize < 2) {
return 0;
}
var mean = goog.math.average.apply(null, arguments);
var variance =
goog.math.sum.apply(null, goog.array.map(arguments, function(val) {
return Math.pow(val - mean, 2);
})) / (sampleSize - 1);
return variance;
};
/**
* Returns the sample standard deviation of the arguments. For a definition of
* sample standard deviation, see e.g.
* http://en.wikipedia.org/wiki/Standard_deviation
* @param {...number} var_args Number samples to analyze.
* @return {number} The sample standard deviation of the arguments (0 if fewer
* than two samples were provided, or {@code NaN} if any of the samples is
* not a valid number).
*/
goog.math.standardDeviation = function(var_args) {
return Math.sqrt(goog.math.sampleVariance.apply(null, arguments));
};
/**
* Returns whether the supplied number represents an integer, i.e. that is has
* no fractional component. No range-checking is performed on the number.
* @param {number} num The number to test.
* @return {boolean} Whether {@code num} is an integer.
*/
goog.math.isInt = function(num) {
return isFinite(num) && num % 1 == 0;
};
/**
* Returns whether the supplied number is finite and not NaN.
* @param {number} num The number to test.
* @return {boolean} Whether {@code num} is a finite number.
*/
goog.math.isFiniteNumber = function(num) {
return isFinite(num) && !isNaN(num);
};
/**
* @param {number} num The number to test.
* @return {boolean} Whether it is negative zero.
*/
goog.math.isNegativeZero = function(num) {
return num == 0 && 1 / num < 0;
};
/**
* Returns the precise value of floor(log10(num)).
* Simpler implementations didn't work because of floating point rounding
* errors. For example
* <ul>
* <li>Math.floor(Math.log(num) / Math.LN10) is off by one for num == 1e+3.
* <li>Math.floor(Math.log(num) * Math.LOG10E) is off by one for num == 1e+15.
* <li>Math.floor(Math.log10(num)) is off by one for num == 1e+15 - 1.
* </ul>
* @param {number} num A floating point number.
* @return {number} Its logarithm to base 10 rounded down to the nearest
* integer if num > 0. -Infinity if num == 0. NaN if num < 0.
*/
goog.math.log10Floor = function(num) {
if (num > 0) {
var x = Math.round(Math.log(num) * Math.LOG10E);
return x - (parseFloat('1e' + x) > num ? 1 : 0);
}
return num == 0 ? -Infinity : NaN;
};
/**
* A tweaked variant of {@code Math.floor} which tolerates if the passed number
* is infinitesimally smaller than the closest integer. It often happens with
* the results of floating point calculations because of the finite precision
* of the intermediate results. For example {@code Math.floor(Math.log(1000) /
* Math.LN10) == 2}, not 3 as one would expect.
* @param {number} num A number.
* @param {number=} opt_epsilon An infinitesimally small positive number, the
* rounding error to tolerate.
* @return {number} The largest integer less than or equal to {@code num}.
*/
goog.math.safeFloor = function(num, opt_epsilon) {
goog.asserts.assert(!goog.isDef(opt_epsilon) || opt_epsilon > 0);
return Math.floor(num + (opt_epsilon || 2e-15));
};
/**
* A tweaked variant of {@code Math.ceil}. See {@code goog.math.safeFloor} for
* details.
* @param {number} num A number.
* @param {number=} opt_epsilon An infinitesimally small positive number, the
* rounding error to tolerate.
* @return {number} The smallest integer greater than or equal to {@code num}.
*/
goog.math.safeCeil = function(num, opt_epsilon) {
goog.asserts.assert(!goog.isDef(opt_epsilon) || opt_epsilon > 0);
return Math.ceil(num - (opt_epsilon || 2e-15));
};
|
/sanity-nupic-0.0.15.tar.gz/sanity-nupic-0.0.15/htmsanity/nupic/sanity/public/demos/out/goog/math/math.js
| 0.960888 | 0.673641 |
math.js
|
pypi
|
[](https://github.com/UBC-MDS/sanityze/actions/workflows/ci-cd.yml) [](https://opensource.org/licenses/MIT) [](https://sanityze.readthedocs.io/en/latest/?badge=latest)  [](https://www.repostatus.org/#active) [](https://www.python.org/downloads/release/python-390/)
# sanityze

Data scientists often need to remove or redact Personal Identifiable Information (PII) from their data. This package provides utilities to spot and redact PII from Pandas data frames.
PII can be used to uniquely identify a person. This includes names, addresses, credit card numbers, phone numbers, email addresses, and social security numbers, and therefore regulatory bodies such as the European Union's General Data Protection Regulation (GDPR) and the California Consumer Privacy Act (CCPA) require that PII be removed or redacted from data sets before they are shared an further processed.
## Contributors and Maintainers
- [Tony Zoght](https://github.com/tzoght)
- [Caesar Wong](https://github.com/caesarw0)
- [Jonah Hamilton](https://github.com/xXJohamXx)
## Why `sanityze` ?
Because it's a fun name and it's a play on the word "sanitize" which is what we are doing to the data.
## Similar packages in Python
The closet Python package in functionality to sanityze is [scrubadub](https://scrubadub.readthedocs.io/en/stable/) which is a package for finding and removing PII from text. The package is not designed to work with Pandas data frames, or other data structures, and we believe that our package will be more useful to data scientists, as we add more spotters (mechanisms for finding PII), support for more data structures, and provide mechanisms for users to define their own spotters.
## Quick Start
To get started with `sanityze`, install it using `pip`:
```bash
pip install sanityze
```
And visit the [documentation](https://ubc-mds.github.io/sanityze/) for more information and examples.
## Features and Usage
Conceptually, `sanityze` is a package that provides a way to remove PII from Pandas data frames. The package provides a number of default spotters, which can be used to identify PII in the data and redact them.
The main entry point to the package is the `Cleanser` class. The `Cleanser` class is used to add `Spotter`s to the cleanser, which will be used to identify PII in the data. The cleanser can then be used to cleanse the data, and redact the PII from the given data frame (all future data structures that will be suppportd by the package, in the future).
The package comes with a number of default spotters, as subclassess of `Spotter`:
1. `CreditCardSpotter` - identifies credit card numbers
2. `EmailSpotter` - identifies email addresses
Spotters can be added to it using the `add_spotter()` method. The cleanser can then be used to cleanse data using the `cleanse()` method which takes a Pandas data frame and returns a Pandas data frame with PII redacted.
The redaction options provided by `sanityze`` are:
1. Redact using a fixed string - The string in this case is the ID of the spotter. For example, if the spotter is an instance of `CreditCardSpotter`, the string will be `{{CREDITCARD}}`, or `{{EMAILADDRS}}` for an instance of `EmailSpotter`.
2. Redact using a hash of the input - The hash is computed using the `hashlib` package, and the hash function is `md5`. For example, if the spotter is an instance of `CreditCardSpotter`, the string will be `{{6a8b8c6c8c62bc939a11f36089ac75dd}}`, if the input is contains a PII `1234-5678-9012-3456`.
## Classes and Functions
1. `Cleanser`: the main class of the package. It is used to add spotters to it, and then cleanse data using the spotters.
1. `add_spotter()`: adds a spotter to the cleanser
2. `remove_spotter()`: removes a spotter from the cleanser
3. `clean()`: cleanses the data in the given data frame, and returns a new data frame with PII redacted
2. `EmailSpotter`: a spotter that identifies email addresses
1. `getUID()`: returns the unique ID of the spotter
2. `process()`: performs the PII matching and redaction
3. `CreditCardSpotter`: a spotter that identifies credit card numbers
1. `getUID()`: returns the unique ID of the spotter
2. `process()`: performs the PII matching and redaction
> You can checkout detailed API Documentations [here](https://ubc-mds.github.io/sanityze/).
Below is a simple quick start example:
```python
import pandas as pd
from sanityze import Cleanser, EmailSpotter
# Create a cleanser, and don't add the default spotters
cleanser = Cleanser(include_default_spotters=False)
cleaner.add_spotter(from sanityze import Cleanser, EmailSpotter())
cleaned_df = cleanser.clean(df)
```
## High-level Design
To better understand the design of the package, we have provided a high-level design document, which will be kept up to date as the package evolves. The document can be found [here](HighLevelDesign.md).
## Contributing
Interested in contributing? Check out the [contributing guidelines](CONTRIBUTING.md). Please note that this project is released with a Code of Conduct. By contributing to this project, you agree to abide by its terms.
## License
`sanityze` was created by Caesar Wong, Jonah Hamilton and Tony Zoght. It is licensed under the terms of the [MIT license](LICENSE).
## Credits
`sanityze` was created with [`cookiecutter`](https://cookiecutter.readthedocs.io/en/latest/) and the `py-pkgs-cookiecutter` [template](https://github.com/py-pkgs/py-pkgs-cookiecutter).
## Quick Links
- [PyPI](https://pypi.org/project/sanityze/)
- [Read the Docs](https://sanityze.readthedocs.io/en/latest/?badge=latest)
- [Documentation on GH](https://ubc-mds.github.io/sanityze/)
- [Kanban Board](https://github.com/orgs/UBC-MDS/projects/15)
- [Issues](https://github.com/UBC-MDS/sanityze/issues)
- [High Level Design](HighLevelDesign.md)
- [Contributing Guidelines](CONTRIBUTING.md)
- [Code of Conduct](CODE_OF_CONDUCT.md)
- [License](LICENSE)
|
/sanityze-1.0.2.tar.gz/sanityze-1.0.2/README.md
| 0.44746 | 0.952309 |
README.md
|
pypi
|
# SankeyFlow
SankeyFlow is a lightweight python package that plots [Sankey flow diagrams](https://en.wikipedia.org/wiki/Sankey_diagram) using Matplotlib.

```py
import matplotlib.pyplot as plt
from sankeyflow import Sankey
flows = [
('Product', 'Total revenue', 20779),
('Sevice and other', 'Total revenue', 30949),
('Total revenue', 'Gross margin', 34768),
('Total revenue', 'Cost of revenue', 16960),
...
]
s = Sankey(flows=flows)
s.draw()
plt.show()
```
See [example/msft_FY22q2.py](example/msft_FY22q2.py) for full example.
## Description
While Matplotlib does have a builtin sankey class, it is designed around single node flows. SankeyFlow instead focuses on directional flows, and looks more similar to plotly and [SankeyMATIC](https://sankeymatic.com/). It also treats nodes and flows separately, so the node value, inflows, and outflows don't have to be equal.

SankeyFlow is also fully transparent with Matplotlib; the sankey diagram requires only an axis to be drawn: `Sankey.draw(ax)`. All elements in the diagram are Matplotlib primitives (`Patch` and `Text`), and can be directly modified with the full suite of Matplotlib options.
## Installation
Requires Matplotlib and numpy.
```bash
python3 -m pip install sankeyflow
```
You can then simpliy
```py
from sankeyflow import Sankey
```
## Usage
The core class is `sankeyflow.Sankey`, which builds and draws the diagram. Data is passed in the constructor or with `Sankey.sankey(flows, nodes)`, and the diagram is drawn with `Sankey.draw(ax)`.
The diagram defaults to a left-to-right flow pattern, and breaks the nodes into "levels," which correspond to the x position. The cutflow diagram above has 5 levels, for example.
- `nodes` is a nested list of length `nlevels`, ordered from left to right. For each level, there is a list of nodes ordered from top to bottom. Each node is a `(name, value)` pair.
- `flows` is a list of flows, coded as `(source, destination, value)`. `source` and `destination` should match the `name`s in `nodes`.
If `nodes` is `None`, the nodes will be automatically inferred and placed from the flows.
```
nodes = [
[('A', 10)],
[('B1', 4), ('B2', 5)],
[('C', 3)]
]
flows = [
('A', 'B1', 4),
('A', 'B2', 5),
('B1', 'C', 1),
('B2', 'C', 2),
]
plt.figure(figsize=(4, 3), dpi=144)
s = Sankey(flows=flows, nodes=nodes)
s.draw()
```

### Configuration
Diagram and global configuration are set in the constructor. Individual nodes and flows can be further modified by adding a dictionary containing configuration arguments to the input tuples in `Sankey.sankey()`. See docstrings for complete argument lists.
For example, we can change the colormap to pastel, make all flows not curvy, and change the color of one flow.
```py
flows = [
('A', 'B1', 4),
('A', 'B2', 5),
('B1', 'C', 1),
('B2', 'C', 2, {'color': 'red'}),
]
s = Sankey(
flows=flows,
nodes=nodes,
cmap=plt.cm.Pastel1,
flow_opts=dict(curvature=0),
)
s.draw()
```

By default the color of the flows is the color of the destination node. This can be altered globally or per-flow with `flow_color_mode`.
```py
flows = [
('A', 'B1', 4),
('A', 'B2', 5, {'flow_color_mode': 'dest'}),
('B1', 'C', 1),
('B2', 'C', 2),
]
s = Sankey(
flows=flows,
nodes=nodes,
flow_color_mode='source',
)
s.draw()
```

We can also easily adjust the label formatting and other node properties in the same way.
```py
nodes = [
[('A', 10)],
[('B1', 4), ('B2', 5)],
[('C', 3, {'label_pos':'top'})]
]
flows = [
('A', 'B1', 4),
('A', 'B2', 5),
('B1', 'C', 1),
('B2', 'C', 2),
]
s = Sankey(
flows=flows,
nodes=nodes,
node_opts=dict(label_format='{label} ${value:.2f}'),
)
s.draw()
```

### Automatic Node Inference
Nodes can be automatically inferred from the flows by setting `nodes=None` in `Sankey.sankey()`. They are placed in the order they appear in the flows.
```py
gross = [
('Gross margin', 'Operating\nincome', 200),
('Gross margin', 'MG&A', 100),
('Gross margin', 'R&D', 100),
]
income = [
('Operating\nincome', 'Income', 200),
('Other income', 'Income', 100, {'flow_color_mode': 'source'}),
]
plt.subplot(121)
s1 = Sankey(flows=gross + income)
s1.draw()
plt.subplot(122)
s2 = Sankey(flows=gross[:1] + income + gross[1:])
s2.draw()
plt.tight_layout()
```

If you want to configure individual nodes while using the automatic inference, you can either access the nodes directly:
```py
s = Sankey(flows=flows)
s.find_node('name')[0].label = 'My label'
```
or retrieve the inferred nodes and edit the list before passing to `Sankey.sankey()`:
```py
nodes = Sankey.infer_nodes(flows)
# edit nodes
s.sankey(flows, nodes)
```
The latter is the only way to edit the ordering or level of the inferred nodes.
|
/sankeyflow-0.3.7.tar.gz/sankeyflow-0.3.7/README.md
| 0.414543 | 0.990741 |
README.md
|
pypi
|
## Detection Parameters
To detect action potentials, SanPy uses a number of parameters. These can all be configured using the [detection parameter plugin](../plugins/#detection-parameters) or programmatically with the API [sanpy/detectionParams](../api/bDetection).
Note: To update this table use sanpy/bDetection.py
| | Parameter | Default Value | Units | Human Readable | Description |
|---:|:-------------------|:---------------------|:--------|:----------------------------------------|:-------------------------------------------------------------------------------------------------------|
| 0 | dvdtThreshold | 100 | dVdt | dV/dt Threshold | dV/dt threshold for a spike, will be backed up to dvdt_percentOfMax and have xxx error when this fails |
| 1 | mvThreshold | -20 | mV | mV Threshold | mV threshold for spike AND minimum spike mV when detecting with dV/dt |
| 2 | dvdt_percentOfMax | 0.1 | Percent | dV/dt Percent of max | For dV/dt detection, the final TOP is when dV/dt drops to this percent from dV/dt AP peak |
| 3 | onlyPeaksAbove_mV | | mV | Accept Peaks Above (mV) | For dV/dt detection, only accept APs above this value (mV) |
| 4 | doBackupSpikeVm | True | Boolean | Backup Vm Spikes | If true, APs detected with just mV will be backed up until Vm falls to xxx |
| 5 | refractory_ms | 170 | ms | Minimum AP interval (ms) | APs with interval (with respect to previous AP) less than this will be removed |
| 6 | peakWindow_ms | 100 | ms | Peak Window (ms) | Window after TOP (ms) to seach for AP peak (mV) |
| 7 | dvdtPreWindow_ms | 10 | ms | dV/dt Pre Window (ms) | Window (ms) to search before each TOP for real threshold crossing |
| 8 | mdp_ms | 250 | ms | Pre AP MDP window (ms) | Window (ms) before an AP to look for MDP |
| 9 | avgWindow_ms | 5 | ms | | Window (ms) to calculate MDP (mV) as a mean rather than mV at single point for MDP |
| 10 | halfHeights | [10, 20, 50, 80, 90] | | AP Durations (%) | AP Durations as percent of AP height (AP Peak (mV) - TOP (mV)) |
| 11 | halfWidthWindow_ms | 200 | ms | Half Width Window (ms) | Window (ms) after TOP to look for AP Durations |
| 12 | medianFilter | 0 | points | Median Filter Points | Number of points in median filter, must be odd, 0 for no filter |
| 13 | SavitzkyGolay_pnts | 5 | points | SavitzkyGolay Filter Points | Number of points in SavitzkyGolay filter, must be odd, 0 for no filter |
| 14 | SavitzkyGolay_poly | 2 | | Savitzky-Golay Filter Polynomial Degree | The degree of the polynomial for Savitzky-Golay filter |
| 15 | spikeClipWidth_ms | 500 | ms | AP Clip Width (ms) | The width/duration of generated AP clips |
## Detection Errors
When SanPy encounters errors during spike detection, they are stored for each spike in ['errors']. Each error has a name like 'dvdtPercent' as follows
- **dvdtPercent**: Error searching for percent (10%) of dvdt max. When this occurs, the TOP (mV) of a spike will be more depolarized than it should be.
- **preMin**: Error searching for spike pre min, the MDP before a spike. This can occur on the first spike if it is close to the beginning of the recording.
- **postMin**:
- **fitEDD**: Error while fitting slope of EDD.
- **preSpikeDvDt**: Error while searching for peak in max ap upstroke (dV/dt) between spike threshold (TOP) and the peak in the first derivative of Vm (dV/dt).
- **cycleLength**: Usually occurs on last spike when looking for next MDP.
- **spikeWidth**: Error finding a particular spike with (AP_Dur). Usually occurs when spikes are too broad, can increase detection parameter `hwWindow_ms`.
# Analysis results
Once spike are detected, SanPy has the following analysis results.
| | Stat | name | units | yStat | yStatUnits | xStat | xStatUnits |
|---:|:----------------------------------|:---------------------------|:--------|:---------------------------|:-------------|:-----------------------|:-------------|
| 0 | Take Off Potential (s) | thresholdSec | s | thresholdVal | mV | thresholdSec | s |
| 1 | Take Off Potential (mV) | thresholdVal | mV | thresholdVal | mV | thresholdPnt | Points |
| 2 | Spike Frequency (Hz) | spikeFreq_hz | Hz | spikeFreq_hz | Hz | thresholdPnt | Points |
| 3 | Cycle Length (ms) | cycleLength_ms | ms | cycleLength_ms | ms | thresholdPnt | Points |
| 4 | AP Peak (mV) | peakVal | mV | peakVal | mV | peakPnt | Points |
| 5 | AP Height (mV) | peakHeight | mV | peakHeight | mV | peakPnt | Points |
| 6 | Pre AP Min (mV) | preMinVal | mV | preMinVal | mV | preMinPnt | Points |
| 7 | Post AP Min (mV) | postMinVal | mV | postMinVal | mV | postMinPnt | Points |
| 8 | Early Diastolic Depol Rate (dV/s) | earlyDiastolicDurationRate | dV/s | earlyDiastolicDurationRate | dV/s | | |
| 9 | Early Diastolic Duration (ms) | earlyDiastolicDuration_ms | ms | earlyDiastolicDuration_ms | dV/s | thresholdPnt | Points |
| 10 | Diastolic Duration (ms) | diastolicDuration_ms | ms | diastolicDuration_ms | dV/s | thresholdPnt | Points |
| 11 | Max AP Upstroke (mV) | preSpike_dvdt_max_val | mV | preSpike_dvdt_max_val | dV/s | preSpike_dvdt_max_pnt | Points |
| 12 | Max AP Upstroke (dV/dt) | preSpike_dvdt_max_val2 | dV/dt | preSpike_dvdt_max_val2 | dV/dt | preSpike_dvdt_max_pnt | Points |
| 13 | Max AP Repolarization (mV) | postSpike_dvdt_min_val | mV | postSpike_dvdt_min_val | mV | postSpike_dvdt_min_pnt | Points |
| 14 | AP Duration (ms) | apDuration_ms | ms | apDuration_ms | ms | thresholdPnt | Points |
| 15 | Half Width 10 (ms) | nan | nan | widths_10 | ms | | |
| 16 | Half Width 20 (ms) | nan | nan | widths_20 | ms | | |
| 17 | Half Width 50 (ms) | nan | nan | widths_50 | ms | | |
| 18 | Half Width 80 (ms) | nan | nan | widths_80 | ms | | |
| 19 | Half Width 90 (ms) | nan | nan | widths_90 | ms | | |
| 20 | Ca++ Delay (s) | nan | nan | caDelay_sec | s | | |
| 21 | Ca++ Width (ms) | nan | nan | caWidth_ms | ms | | |
# Analysis results (full)
<!-- <iframe src="../static/analysis-output-full.html" width="800" height="800" style="border: 0" seamless></iframe> -->
Generated 2023-03-24 with sanpy.analysisVersion 20230324a
Note: To update this table use sanpy/bAnalysisResults.py
<table border="1" class="dataframe" style="width:600">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Name</th>
<th>type</th>
<th>default</th>
<th>units</th>
<th>depends on detection</th>
<th>error</th>
<th>description</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>analysisDate</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Date of analysis in yyyymmdd format.</td>
</tr>
<tr>
<th>1</th>
<td>analysisTime</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Time of analysis in hh:mm:ss 24 hours format.</td>
</tr>
<tr>
<th>2</th>
<td>modDate</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Modification date if AP is modified after detection.</td>
</tr>
<tr>
<th>3</th>
<td>modTime</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Modification time if AP is modified after detection.</td>
</tr>
<tr>
<th>4</th>
<td>analysisVersion</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Analysis version when analysis was run. See sanpy.analysisVersion</td>
</tr>
<tr>
<th>5</th>
<td>interfaceVersion</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Interface version string when analysis was run. See sanpy.interfaceVersion</td>
</tr>
<tr>
<th>6</th>
<td>file</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Name of raw data file analyzed</td>
</tr>
<tr>
<th>7</th>
<td>detectionType</td>
<td></td>
<td>None</td>
<td></td>
<td></td>
<td></td>
<td>Type of detection, either vm or dvdt. See enum sanpy.bDetection.detectionTypes</td>
</tr>
<tr>
<th>8</th>
<td>cellType</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>User specified cell type</td>
</tr>
<tr>
<th>9</th>
<td>sex</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>User specified sex</td>
</tr>
<tr>
<th>10</th>
<td>condition</td>
<td>str</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>User specified condition</td>
</tr>
<tr>
<th>11</th>
<td>sweep</td>
<td>int</td>
<td>0</td>
<td></td>
<td></td>
<td></td>
<td>Sweep number of analyzed sweep. Zero based.</td>
</tr>
<tr>
<th>12</th>
<td>epoch</td>
<td>int</td>
<td>NaN</td>
<td></td>
<td></td>
<td></td>
<td>Stimulus epoch number the spike occured in. Zero based.</td>
</tr>
<tr>
<th>13</th>
<td>epochLevel</td>
<td>float</td>
<td>NaN</td>
<td></td>
<td></td>
<td></td>
<td>Epoch level (DAC) stimulus during the spike.</td>
</tr>
<tr>
<th>14</th>
<td>sweepSpikeNumber</td>
<td>int</td>
<td>None</td>
<td></td>
<td></td>
<td></td>
<td>Spike number within the sweep. Zero based.</td>
</tr>
<tr>
<th>15</th>
<td>spikeNumber</td>
<td>int</td>
<td>None</td>
<td></td>
<td></td>
<td></td>
<td>Spike number across all sweeps. Zero based.</td>
</tr>
<tr>
<th>16</th>
<td>include</td>
<td>bool</td>
<td>True</td>
<td></td>
<td></td>
<td></td>
<td>Boolean indication include or not. Can be set by user/programmatically after analysis.</td>
</tr>
<tr>
<th>17</th>
<td>userType</td>
<td>int</td>
<td>0</td>
<td></td>
<td></td>
<td></td>
<td>Integer indication user type. Can be set by user/programmatically after analysis.</td>
</tr>
<tr>
<th>18</th>
<td>errors</td>
<td>list</td>
<td>[]</td>
<td></td>
<td></td>
<td></td>
<td>List of dictionary to hold detection errors for this spike</td>
</tr>
<tr>
<th>19</th>
<td>dvdtThreshold</td>
<td>float</td>
<td>NaN</td>
<td>dvdt</td>
<td>dvdtThreshold</td>
<td></td>
<td>AP Threshold in derivative dv/dt</td>
</tr>
<tr>
<th>20</th>
<td>mvThreshold</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td>mvThreshold</td>
<td></td>
<td>AP Threshold in primary recording mV</td>
</tr>
<tr>
<th>21</th>
<td>medianFilter</td>
<td>int</td>
<td>0</td>
<td></td>
<td>medianFilter</td>
<td></td>
<td>Median filter to generate filtered vm and dvdt. Value 0 indicates no filter.</td>
</tr>
<tr>
<th>22</th>
<td>halfHeights</td>
<td>list</td>
<td>[]</td>
<td></td>
<td>halfHeights</td>
<td></td>
<td>List of int to specify half-heights like [10, 20, 50, 80, 90].</td>
</tr>
<tr>
<th>23</th>
<td>thresholdPnt</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td></td>
<td></td>
<td>AP threshold point</td>
</tr>
<tr>
<th>24</th>
<td>thresholdSec</td>
<td>float</td>
<td>NaN</td>
<td>sec</td>
<td></td>
<td></td>
<td>AP threshold seconds</td>
</tr>
<tr>
<th>25</th>
<td>thresholdVal</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td></td>
<td></td>
<td>Value of Vm at AP threshold point.</td>
</tr>
<tr>
<th>26</th>
<td>thresholdVal_dvdt</td>
<td>float</td>
<td>NaN</td>
<td>dvdt</td>
<td></td>
<td></td>
<td>Value of dvdt at AP threshold point.</td>
</tr>
<tr>
<th>27</th>
<td>dacCommand</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td></td>
<td></td>
<td>Value of DAC command at AP threshold point.</td>
</tr>
<tr>
<th>28</th>
<td>peakPnt</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td>(onlyPeaksAbove_mV, peakWindow_ms)</td>
<td></td>
<td>AP peak point.</td>
</tr>
<tr>
<th>29</th>
<td>peakSec</td>
<td>float</td>
<td>NaN</td>
<td>sec</td>
<td></td>
<td></td>
<td>AP peak seconds.</td>
</tr>
<tr>
<th>30</th>
<td>peakVal</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td></td>
<td></td>
<td>Value of Vm at AP peak point.</td>
</tr>
<tr>
<th>31</th>
<td>peakHeight</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td></td>
<td></td>
<td>Difference between peakVal minus thresholdVal.</td>
</tr>
<tr>
<th>32</th>
<td>timeToPeak_ms</td>
<td>float</td>
<td>NaN</td>
<td>ms</td>
<td></td>
<td></td>
<td>Time to peak (ms) after TOP.</td>
</tr>
<tr>
<th>33</th>
<td>preMinPnt</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td>mdp_ms</td>
<td></td>
<td>Minimum before an AP taken from predefined window.</td>
</tr>
<tr>
<th>34</th>
<td>preMinVal</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td></td>
<td></td>
<td>Minimum before an AP taken from predefined window.</td>
</tr>
<tr>
<th>35</th>
<td>preLinearFitPnt0</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td></td>
<td></td>
<td>Point where pre linear fit starts. Used for EDD Rate</td>
</tr>
<tr>
<th>36</th>
<td>preLinearFitPnt1</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td></td>
<td></td>
<td>Point where pre linear fit stops. Used for EDD Rate</td>
</tr>
<tr>
<th>37</th>
<td>earlyDiastolicDuration_ms</td>
<td>float</td>
<td>NaN</td>
<td>ms</td>
<td></td>
<td></td>
<td>Time (ms) between start/stop of EDD.</td>
</tr>
<tr>
<th>38</th>
<td>preLinearFitVal0</td>
<td>float</td>
<td>NaN</td>
<td>mv</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<th>39</th>
<td>preLinearFitVal1</td>
<td>float</td>
<td>NaN</td>
<td>mv</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<th>40</th>
<td>earlyDiastolicDurationRate</td>
<td>float</td>
<td>NaN</td>
<td>mv/S</td>
<td></td>
<td></td>
<td>Early diastolic duration rate, the slope of the linear fit between start/stop of EDD.</td>
</tr>
<tr>
<th>41</th>
<td>lateDiastolicDuration</td>
<td>float</td>
<td>NaN</td>
<td></td>
<td></td>
<td></td>
<td>Depreciated</td>
</tr>
<tr>
<th>42</th>
<td>preSpike_dvdt_max_pnt</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td></td>
<td></td>
<td>Point corresponding to peak in dv/dt before an AP.</td>
</tr>
<tr>
<th>43</th>
<td>preSpike_dvdt_max_val</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td></td>
<td></td>
<td>Value of Vm at peak of dv/dt before an AP.</td>
</tr>
<tr>
<th>44</th>
<td>preSpike_dvdt_max_val2</td>
<td>float</td>
<td>NaN</td>
<td>dv/dt</td>
<td></td>
<td></td>
<td>Value of dv/dt at peak of dv/dt before an AP.</td>
</tr>
<tr>
<th>45</th>
<td>postSpike_dvdt_min_pnt</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td>dvdtPostWindow_ms</td>
<td></td>
<td>Point corresponding to min in dv/dt after an AP.</td>
</tr>
<tr>
<th>46</th>
<td>postSpike_dvdt_min_val</td>
<td>float</td>
<td>NaN</td>
<td>mV</td>
<td></td>
<td></td>
<td>Value of Vm at minimum of dv/dt after an AP.</td>
</tr>
<tr>
<th>47</th>
<td>postSpike_dvdt_min_val2</td>
<td>float</td>
<td>NaN</td>
<td>dvdt</td>
<td></td>
<td></td>
<td>Value of dv/dt at minimum of dv/dt after an AP.</td>
</tr>
<tr>
<th>48</th>
<td>isi_pnts</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td>refractory_ms</td>
<td></td>
<td>Inter-Spike-Interval (points) with respect to previous AP.</td>
</tr>
<tr>
<th>49</th>
<td>isi_ms</td>
<td>float</td>
<td>NaN</td>
<td>ms</td>
<td></td>
<td></td>
<td>Inter-Spike-Interval (ms) with respect to previous AP.</td>
</tr>
<tr>
<th>50</th>
<td>spikeFreq_hz</td>
<td>float</td>
<td>NaN</td>
<td>Hz</td>
<td></td>
<td></td>
<td>AP frequency with respect to previous AP.</td>
</tr>
<tr>
<th>51</th>
<td>cycleLength_pnts</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td></td>
<td></td>
<td>Points between APs with respect to previous AP.</td>
</tr>
<tr>
<th>52</th>
<td>cycleLength_ms</td>
<td>int</td>
<td>NaN</td>
<td>point</td>
<td></td>
<td></td>
<td>Time (ms) between APs with respect to previous AP.</td>
</tr>
<tr>
<th>53</th>
<td>diastolicDuration_ms</td>
<td>float</td>
<td>NaN</td>
<td>ms</td>
<td></td>
<td></td>
<td>Time (ms) between minimum before AP (preMinPnt) and AP time (thresholdPnt).</td>
</tr>
<tr>
<th>54</th>
<td>widths</td>
<td>list</td>
<td>[]</td>
<td></td>
<td></td>
<td></td>
<td>A list of dict to hold half-height information for each half-height in detection halfHeights.</td>
</tr>
<tr>
<th>55</th>
<td>widths_10</td>
<td>int</td>
<td>NaN</td>
<td>percent</td>
<td>halfWidthWindow_ms</td>
<td></td>
<td>Width (ms) at half-height 10 %.</td>
</tr>
<tr>
<th>56</th>
<td>widths_20</td>
<td>int</td>
<td>NaN</td>
<td>percent</td>
<td>halfWidthWindow_ms</td>
<td></td>
<td>Width (ms) at half-height 20 %.</td>
</tr>
<tr>
<th>57</th>
<td>widths_50</td>
<td>int</td>
<td>NaN</td>
<td>percent</td>
<td>halfWidthWindow_ms</td>
<td></td>
<td>Width (ms) at half-height 50 %.</td>
</tr>
<tr>
<th>58</th>
<td>widths_80</td>
<td>int</td>
<td>NaN</td>
<td>percent</td>
<td>halfWidthWindow_ms</td>
<td></td>
<td>Width (ms) at half-height 80 %.</td>
</tr>
<tr>
<th>59</th>
<td>widths_90</td>
<td>int</td>
<td>NaN</td>
<td>percent</td>
<td>halfWidthWindow_ms</td>
<td></td>
<td>Width (ms) at half-height 90 %.</td>
</tr>
</tbody>
</table>
<br>
# What spike parameters are detected?
For cardiac myocyte analysis, SanPy follows the nomenclature from this paper:
[Larson, et al (2013) Depressed pacemaker activity of sinoatrial node
myocytes contributes to the age-dependent decline in maximum heart rate. PNAS 110(44):18011-18016][larson et al 2013]
- MDP and Vmax were defined as the most negative and positive membrane potentials, respectively
- Take-off potential (TOP) was defined as the membrane potential when the first derivative of voltage with respect to time (dV/dt) reached 10% of its maximum value
- Cycle length was defined as the interval between MDPs in successive APs
- The maximum rates of the AP upstroke and repolarization were taken as the maximum and minimum values of the first derivative (dV/dtmax and dV/dtmin, respectively)
- [[[REMOVED 20210501]]] Action potential duration (APD) was defined as the interval between the TOP and the subsequent MDP
- APD_50 and APD_90 were defined as the interval between the TOP and 50% and 90% repolarization, respectively
- The diastolic duration was defined as the interval between MDP and TOP
- The early diastolic depolarization rate was estimated as the slope of a linear fit between 10% and 50% of the diastolic duration and the early diastolic duration was the corresponding time interval
- The nonlinear late diastolic depolarization phase was estimated as the duration between 1% and 10% dV/dt
[larson et al 2013]: https://www.ncbi.nlm.nih.gov/pubmed/24128759
|
/sanpy-ephys-0.1.25.tar.gz/sanpy-ephys-0.1.25/docs/docs/methods.md
| 0.926354 | 0.847337 |
methods.md
|
pypi
|
The SanPy deskop application is an easy to use and powerful GUI designed to satisfy all your analysis needs. You can [download](../download) the desktop appication or [build from source](../install).
## Getting Started
Load a folder of raw data files with the `Load Folder` button, or use the `File - Load Folder ...` menu, or drag and drop a folder from your hard-drive. Once a folder of raw data is loaded, each file in the folder will be shown in a list, one row per raw data file. Selecting a file will display the raw data recording.
Spike detection is then performed by specifying a threshold in either the derivative of the membrane potential (Detect dV/dt) or the membrane potential (Detect mV).
Once spikes are detected, the analysis results are overlaid over the plots of the raw data. Finally, [plugins](../plugins) can be used to inspect the analysis results.
<IMG SRC="../img/sanpy-app.png" width=700>
## File List Table
<IMG = SRC="../img/desktop-main-window/file-list.png" width=700>
A list of files in a loaded folder, each row is a file and columns are information on the file including a subset of the [detection parameters](../methods/#detection-parameters) and [analysis results](../methods/#analysis-results-full). The file list table can be toggled on and off with the `View - File Panel` menu or keyboard `f`.
- **L** - Indicates if the file is loaded. Use the right-click menu `Unload Data` to unload a loaded file. This can save memory.
- **A** - Indicates if the file has been analyzed.
- **S** - Indicates if the analysis has been saved.
- **N** - Once analyzed, indicates the number of spikes detected.
- **File** - The name of the raw data file.
- **Dur(s)** - The duration of the recording (this is the duration of each sweep).
- **Sweeps** - The number of sweeps.
- **Epochs** - The number of epochs per sweep. Epochs correspond to different current clamp steps/amplitudes within each sweep.
- **KHz** - The sampling rate of the recording.
- **Mode** - The mode of the recording, either V-Clamp or I-Clamp. Currently, SanPy will only analyze I-Clamp.
- **Start(s)** - Once analyzed, indicates the start second of the analysis.
- **Stop(s)** - Once analyzed, indicates the stop second of the analysis.
- **dvdtTheshold** - Once analyzed, indicates the dV/dt used for detection.
- **mvThreshold** - Once analyzed, indicates the mV used for detection.
<IMG = SRC="../img/desktop-main-window/file-menu-right-click.png" width=125 align="left">
Right-click in the file list table for a popup menu.
- **Unload Data** - Unload the raw data from the selected row. Useful to conserve memory if the folder has lots of files.
- **Synch With Folder** - Synchronize the contents of the folder with SanPy. Useful if you are acquiring new data on an electrophysiology setup.
- **Save All Analysis** - Save all the analysis for a folder to a single hdf5 file. This file is then used to load all the analysis the next time SanPy is run. See also, menu `File - Save Folder Analysis`.
- **Copy Table** - Copy the contents of the file list table. Useful to paste into a spreadsheet.
## Detection panel
<IMG = SRC="../img/desktop-main-window/detection-panel.png" width=250 align="left">
The detection panel has subcategories to detect spikes and to control the display of the raw data and analysis results. The detection panel can be toggled on and off using the `View - Detection Panel` menu or with keyboard `d`.
### Detection
Set detection parameters, finer control of all detection parameters is provided with the [Detection Parameters Plugin](../plugins/#detection-parameters).
- **Presets** - A popup to set a pre-defined set of detection parameters.
- **Detect dV/dt** - Detect spikes with the specified value in the first derivative (dV/dt). The first derivative can be plotted with menu `View - Derivative`.
- **Detect mV** - Detect spikes with the specified value in mV.
<!--
- **From(s) To(s)** - **depreciated** Displays the current x-axis zoom and allows it to be set. Use Click+drag with the mouse to visually zoom in on the recording. Use keyboard 'enter' or 'return' to set a recording to full zoom.
- **Spikes/Freq/Errors** - Once analyzed, displays the number of detected spikes, the mean instantaneous frequency between spikes, and the number of errors encountered during spike detection. View all spike analysis results with the [Summary Spikes](../plugins/#summary-spikes) plugin and all errors with the [Summary Error](../plugins/#summary-error) plugins.
-->
- **Export Spike Report** - Export all analysis for the selected file to a CSV file. This file includes all [detection parameters](../methods/#detection-parameters) and [analysis results](../methods/#analysis-results-full).
### Display
Control the display of SanPy.
- **Sweep** - Set the displayed sweep. This includes a popup menu to select a sweep and controls to go to the previous `<` and next `>` sweep.
<!--
- **Crosshair** - Checkbox to toggle a crosshair to track the mouse position and display the current position in seconds (x) and mV (y).
-->
- **Spike** - Select individual spikes by spike number and scroll to the previous `<<` and next `>>` spike.
- **[]** - A button to set the raw data plots to full scale/zoom. This can also be done with keyboard `enter` or `return`.
### Set Spikes
- Set parameters for the currently selected spike(s) like: Condition, User Type, and include.
### Plot Options
- Control the analysis results that are overlayed over the raw data. See [below](#raw-data-overlayed-with-analysis-results) for a detailed description.
<!-- move byond the pervious image. My Slacker generation comes through !!! -->
<p style="clear: both;">
</p>
## Raw data plots
<IMG = SRC="../img/desktop-main-window/raw-data-plots.png" width=350 align="left">
There are four different plots of the raw data. These can be toggled on and off using the [view menu](#view-menu) entries: `full recording`, `derivative`, and `DAC`. Note, the raw data (bottom plot) is always shown and cannot be toggled.
Click+drag with the mouse to zoom in on the time-axis.
- **Full Recording** - An overview of the total recording. This plot also shows the current zoom as a gray box.
- **Derivative** - The first derivative (dV/dt) of the raw recording. Used for spike detection by setting a value in the [detection panel](#detection).
- **DAC** - A plot of the stimulation output. Please note, in this example it is 0 (no stimulation).
- **Recording** - A plot of the actual recording with analysis results overlaid. Here is shown spike threshold (mV, red circle) and spike peak (mV, green circle).
<!-- move byond the pervious image. My Slacker generation comes through !!! -->
<p style="clear: both;">
</p>
## Raw data overlayed with analysis results
<IMG = SRC="../img/desktop-main-window/plot-options.png" width=250 align="left">
<IMG = SRC="../img/desktop-main-window/raw-data-plot-zoom.png" width=350 align="right">
<!-- move byond the pervious image. My Slacker generation comes through !!! -->
<p style="clear: both;">
</p>
A number of analysis results can be overlaid using the [Plot Options](#plot-options) checkboxes in the [detection](#detection) panel. For a full list of analysis results, see [Methods - Analysis Results](../methods/#analysis-results-full)
- Global Threshold - Plot the spike threshold in the 'Full Recording' plot.
- Threshold (dV/dt) - Plot the spike threshold in the 'Derivative' plot (red circle)
The other plot options are displayed on the main recording (bottom most plot).
- Half-Widths - Spike half with for 10, 20, 50, 80, and 90 percent (yellow lines).
- Pre AP Min - Minimum mV before a spike (mV).
- EDD Rate - The early Diatolic Duation Rate (mV/s).
- Threshold (mV) - Spike threshold (mV). Also used as the time of a spike.
- AP Peak (mV) - Spike peak (mV).
- Epoch Lines - Epochs represent different DAC steps within a sweep (gray vertical lines).
- EDD - The early diastolic duration.
<!-- move byond the pervious image. My generation X comes through !!! -->
<p style="clear: both;">
</p>
## Mouse and Keyboard
### Mouse
- Mouse click - Select individual spikes.
- Mouse wheel - Zoom in and out on x-axis (time).
- Mouse click+drag - Pan the x-axis (time).
- Mouse option+click+drag to zoom into the recording (y-axis).
### Keyboard
- "return" or "enter" - Set plot of recordings to full scale.
- "esc" - Canel spike selection.
- [coming soon] "b" - Toggle selected spike(s) bad.
## Menus
### File menu
<IMG = SRC="../img/desktop-main-window/file-menu.png" width=350 align="left">
- **Load Folder ...** - Load a folder of raw data. A loaded folder will be shown in the [File List Table](#file-list-table).
- **Load Recent ...** - Load recently loaded folders.
- **Save Folder Analysis ...** - Save all the analysis for the loaded folder.
- **Save Preferences** - Save the SanPy preferences. This includes mostly information about the GUI like window position and opened plugins.
- **Show Log** - Show the SanPy log. A log is kept as a user interacts with SanPy. This is useful to send to the developers if there are problems. The logs can also be viewed with the [SanPy Log Plugin](../plugins/#sanpy-log).
<!-- move byond the pervious image. My generation X comes through !!! -->
<p style="clear: both;">
</p>
### View menu
<IMG = SRC="../img/desktop-main-window/view-menu.png" width=175 align="left">
A menu that allows different pieces of the interface to be shown or hidden.
- **File Panel** - Toggle the visibility of the [file list panel](#file-list-table).
- **Detection Panel** - Toggle the visibility of the [detection panel](#detection-panel).
- **Detection** - Toggle [detection](#detection) in the Detection Panel
- **Display** - Toggle [display](#display) in the Detection Panel
- **Plot Options** - Toggle [plot options](#plot-options) in the Detection Panel. This is a panel with checkboxes to show/hide analysis results over the raw data.
- **Set Spikes** - Toggle [set spikes](#set-spikes) in the Detection Panel. This is a panel to allow selected spike parameters to be set. For example, to set spikes as good/bad, user type, and condition.
- **Full Recording** - Toggle the display of the [full recording](#raw-data-plots).
- **Derivative** - Toggle the display of the membrane potential [derivate](#raw-data-plots) (dV/dt). This is useful for detecting spikes with dV/dt.
- **DAC** - Toggle the display of the [current clamp stimulus](#raw-data-plots).
- **Plugins** - Toggle the display of a plugins dock. A right-click in the plugins dock will insert a plugin. Plugins can also be opened as seperate windows with the main [Plugins menu](#plugins-menu).
- **Dark Theme** - Toggle dark and light themes. If checked, SanPy will use a dark theme, otherwise it will use a light theme. Please note that switching themes while SanPy is running will give sub-optimal results. To fully switch themes, select a theme then save preferences with the main `File - Save Preferences` menu, and restart SanPy.
<!-- move byond the pervious image. My generation X comes through !!! -->
<p style="clear: both;">
</p>
### Plugins menu
<IMG = SRC="../img/desktop-main-window/plugins-menu.png" width=175 align="left">
A menu to open a SanPy [plugin](../plugins). Plugins opened with this menu will be displayed in their own window.
To open a plugin within the main SanPy window, use the `View - Plugins` menu to show the plugins dock and then right-click to select a plugin to display.
All open plugins can be saved and re-opened with the next run of SanPy by saving the SanPy preferences with the `File - Save Preferences` menu.
<!-- move beyond the pervious image. My generation X comes through !!! -->
<p style="clear: both;">
</p>
## Plugins
There is a dedicated [plugin](../plugins) documentaion page. Here we want to highlight a few key plugins.
### [Plot Scatter](../plugins#plot-scatter)
The `plot scatter` plugin is designed to plot any [analysis results](../methods#analysis-results-full). Spike selections are bi-directional between the plot scatter widget and the main interface. The markers symbols and colors can be used to specify detailed results per spike. For example, coloring based on time or sweep, if the spike is marked bad, and if the spike has a specified user type. These types of things can be set in the main interface `Detection Panel - Set Spikes`.
<img src="../img/plugins/scatter-plot.png" width="600" align="right">
<!-- move byond the pervious image. My generation X comes through !!! -->
<p style="clear: both;">
</p>
### [Plot FI](../plugins#plot-fi)
The `plot fi` plugin is designed to visualize the raw data and analysis of a current-clamp experiment where a range of hyperpolarizing and depolarizing current steps are delivered.
<img src="../img/plugins/plot-fi.png" width="600" align="right">
<!-- move byond the pervious image. My generation X comes through !!! -->
<p style="clear: both;">
</p>
### [Summarize Results](../plugins/#summarize-results)
The `summarize results` plugin shows a number of different tables to review the analysis results. Here, we focus on errors that occured during spike detection. Each row represents an error in an individual spike. Selecting the error will select the spike in the main interface. This should be used in a curation feedback loop. Once spikes are detected, check for errors and adjust the detection parameters until the errors are acceptable. Alternatively, you can set a tag in individual spikes to 'reject' them.
<img src="../img/plugins/detection-errors.png" width="600" align="right">
<!-- move byond the pervious image. My generation X comes through !!! -->
<p style="clear: both;">
</p>
## User Files
When the SanPy desktop application is first run, it creates a folder to contain user files in `<username>/Documents/SanPy-User-Files`. This is where you drop in your custom code to extend the capabilities of SanPy. This includes:
- [Writing a file loader](../api/writing-a-file-loader)
- [Writing new analysis](../api/writing-new-analysis)
- [Writing a plugin](../api/writing-a-plugin)
|
/sanpy-ephys-0.1.25.tar.gz/sanpy-ephys-0.1.25/docs/docs/desktop-application.md
| 0.880618 | 0.930711 |
desktop-application.md
|
pypi
|
from matplotlib.backends import backend_qt5agg
import matplotlib as mpl
import matplotlib.pyplot as plt
from sanpy.sanpyLogger import get_logger
logger = get_logger(__name__)
import sanpy
from sanpy.interface.plugins import sanpyPlugin
class exampleUserPlugin1(sanpyPlugin):
"""
Plot x/y statistics as a scatter
Get stat names and variables from sanpy.bAnalysisUtil.getStatList()
"""
myHumanName = "Example User Plugin 1"
def __init__(self, **kwargs):
"""
Args:
ba (bAnalysis): Not required
"""
super().__init__(**kwargs)
self.plot()
self.replot()
def plot(self):
"""Create the plot in the widget (called once)."""
self.mplWindow2() # assigns (self.fig, self.axs)
# white line with raw data
(self.line,) = self.axs.plot([], [], "-w", linewidth=0.5)
# red circles with spike threshold
(self.lineDetection,) = self.axs.plot([], [], "ro")
def replot(self):
"""Replot the widget. Usually when the file is switched"""
logger.info("")
if self.ba is None:
return
# get the x/y values from the recording
sweepX = self.getSweep("x") # self.ba.sweepX(sweepNumber=self.sweepNumber)
sweepY = self.getSweep("y") # self.ba.sweepY(sweepNumber=self.sweepNumber)
# update plot of raw data
self.line.set_data(sweepX, sweepY)
# update plot of spike threshold
thresholdSec = self.getStat("thresholdSec")
thresholdVal = self.getStat("thresholdVal")
self.lineDetection.set_data(thresholdSec, thresholdVal)
# make sure the matplotlib axis auto scale
self.axs.relim()
self.axs.autoscale_view(True, True, True)
# plt.draw()
self.static_canvas.draw()
if __name__ == "__main__":
# load an example file
path = "/Users/cudmore/Sites/SanPy/data/19114001.abf"
ba = sanpy.bAnalysis(path)
# set detection parameters
detectionPreset = sanpy.bDetection.detectionPresets.default
detectionClass = sanpy.bDetection(detectionPreset=detectionPreset)
# detectionClass['preSpikeClipWidth_ms'] = 100
# detectionClass['postSpikeClipWidth_ms'] = 400
# spike detect
ba.spikeDetect(detectionClass=detectionClass)
print(ba)
# run the plugin
import sys
from PyQt5 import QtCore, QtWidgets # , QtGui
app = QtWidgets.QApplication([])
sc = exampleUserPlugin1(ba=ba)
sc.show()
sc.replot()
sys.exit(app.exec_())
|
/sanpy-ephys-0.1.25.tar.gz/sanpy-ephys-0.1.25/sanpy/_userFiles/SanPy-User-Files/plugins/exampleUserPlugin1.py
| 0.667364 | 0.321274 |
exampleUserPlugin1.py
|
pypi
|
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QModelIndex
from PyQt5.QtGui import QStandardItemModel
from PyQt5.QtWidgets import QApplication, QTableView
class CheckBoxDelegate(QtWidgets.QItemDelegate):
"""
A delegate that places a fully functioning QCheckBox cell of the column to which it's applied.
"""
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
"""
Important, otherwise an editor is created if the user clicks in this cell.
"""
return None
def paint(self, painter, option, index):
"""
Paint a checkbox without the label.
"""
self.drawCheck(painter, option, option.rect, QtCore.Qt.Unchecked if int(index.data()) == 0 else QtCore.Qt.Checked)
def editorEvent(self, event, model, option, index):
'''
Change the data in the model and the state of the checkbox
if the user presses the left mousebutton and this cell is editable. Otherwise do nothing.
'''
if not int(index.flags() & QtCore.Qt.ItemIsEditable) > 0:
return False
if event.type() == QtCore.QEvent.MouseButtonRelease and event.button() == QtCore.Qt.LeftButton:
# Change the checkbox-state
self.setModelData(None, model, index)
return True
return False
def setModelData (self, editor, model, index):
'''
The user wanted to change the old state in the opposite.
'''
model.setData(index, 1 if int(index.data()) == 0 else 0, QtCore.Qt.EditRole)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
model = QStandardItemModel(4, 3)
tableView = QTableView()
tableView.setModel(model)
delegate = CheckBoxDelegate(None)
tableView.setItemDelegateForColumn(1, delegate)
for row in range(4):
for column in range(3):
index = model.index(row, column, QModelIndex())
model.setData(index, 1)
tableView.setWindowTitle("Check Box Delegate")
tableView.show()
sys.exit(app.exec_())
|
/sanpy-ephys-0.1.25.tar.gz/sanpy-ephys-0.1.25/sandbox/myCheckboxInTable2.py
| 0.528777 | 0.205894 |
myCheckboxInTable2.py
|
pypi
|
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QModelIndex
from PyQt5.QtGui import QStandardItemModel
from PyQt5.QtWidgets import QApplication, QTableView
class CheckBoxDelegate(QtWidgets.QItemDelegate):
"""
A delegate that places a fully functioning QCheckBox cell of the column to which it's applied.
"""
def __init__(self, parent):
QtWidgets.QItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
"""
Important, otherwise an editor is created if the user clicks in this cell.
"""
return None
def paint(self, painter, option, index):
"""
Paint a checkbox without the label.
"""
self.drawCheck(painter, option, option.rect, QtCore.Qt.Unchecked if int(index.data()) == 0 else QtCore.Qt.Checked)
def editorEvent(self, event, model, option, index):
'''
Change the data in the model and the state of the checkbox
if the user presses the left mousebutton and this cell is editable. Otherwise do nothing.
'''
if not int(index.flags() & QtCore.Qt.ItemIsEditable) > 0:
return False
if event.type() == QtCore.QEvent.MouseButtonRelease and event.button() == QtCore.Qt.LeftButton:
# Change the checkbox-state
self.setModelData(None, model, index)
return True
return False
def setModelData (self, editor, model, index):
'''
The user wanted to change the old state in the opposite.
'''
model.setData(index, 1 if int(index.data()) == 0 else 0, QtCore.Qt.EditRole)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
model = QStandardItemModel(4, 3)
tableView = QTableView()
tableView.setModel(model)
delegate = CheckBoxDelegate(None)
tableView.setItemDelegateForColumn(1, delegate)
for row in range(4):
for column in range(3):
index = model.index(row, column, QModelIndex())
model.setData(index, 1)
tableView.setWindowTitle("Check Box Delegate")
tableView.show()
sys.exit(app.exec_())
|
/sanpy-ephys-0.1.25.tar.gz/sanpy-ephys-0.1.25/sandbox/myCheckboxInTable.py
| 0.533641 | 0.206814 |
myCheckboxInTable.py
|
pypi
|
import san.pandas_utils
import san.sanbase_graphql_helper as sgh
from san.batch import Batch
from san.error import SanError
# to be removed
def burn_rate(idx, slug, **kwargs):
query_str = sgh.create_query_str('burn_rate', idx, slug, **kwargs)
return query_str
def token_age_consumed(idx, slug, **kwargs):
query_str = sgh.create_query_str('token_age_consumed', idx, slug, **kwargs)
return query_str
def average_token_age_consumed_in_days(idx, slug, **kwargs):
query_str = sgh.create_query_str(
'average_token_age_consumed_in_days', idx, slug, **kwargs)
return query_str
def prices(idx, slug, **kwargs):
query_str = sgh.create_query_str('prices', idx, slug, **kwargs)
return query_str
def token_velocity(idx, slug, **kwargs):
query_str = sgh.create_query_str('token_velocity', idx, slug, **kwargs)
return query_str
def token_circulation(idx, slug, **kwargs):
query_str = sgh.create_query_str('token_circulation', idx, slug, **kwargs)
return query_str
def realized_value(idx, slug, **kwargs):
query_str = sgh.create_query_str('realized_value', idx, slug, **kwargs)
return query_str
def mvrv_ratio(idx, slug, **kwargs):
query_str = sgh.create_query_str('mvrv_ratio', idx, slug, **kwargs)
return query_str
def nvt_ratio(idx, slug, **kwargs):
query_str = sgh.create_query_str('nvt_ratio', idx, slug, **kwargs)
return query_str
def daily_active_deposits(idx, slug, **kwargs):
query_str = sgh.create_query_str('daily_active_deposits', idx, slug, **kwargs)
return query_str
def ohlc(idx, slug, **kwargs):
query_str = sgh.create_query_str('ohlc', idx, slug, **kwargs)
return query_str
def gas_used(idx, slug, **kwargs):
query_str = sgh.create_query_str('gas_used', idx, slug, **kwargs)
return query_str
def miners_balance(idx, slug, **kwargs):
query_str = sgh.create_query_str('miners_balance', idx, slug, **kwargs)
return query_str
def mining_pools_distribution(idx, slug, **kwargs):
query_str = sgh.create_query_str(
'mining_pools_distribution', idx, slug, **kwargs)
return query_str
def historical_balance(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('historical_balance', **kwargs)
query_str = ("""
query_{idx}: historicalBalance (
address: \"{address}\",
slug: \"{slug}\",
from: \"{from_date}\",
to: \"{to_date}\",
interval: \"{interval}\"
){{
""" + ' '.join(kwargs['return_fields']) + '}}').format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def social_dominance(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('social_dominance', **kwargs)
query_str = ("""
query_{idx}: socialDominance (
slug: \"{slug}\",
from: \"{from_date}\",
to: \"{to_date}\",
interval: \"{interval}\",
source: {source}
){{
""" + ' '.join(kwargs['return_fields']) + '}}').format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def top_holders_percent_of_total_supply(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('top_holders_percent_of_total_supply', **kwargs)
query_str = ("""
query_{idx}: topHoldersPercentOfTotalSupply(
slug: \"{slug}\",
numberOfHolders: {number_of_holders},
from: \"{from_date}\",
to: \"{to_date}\"
){{
""" + ' '.join(kwargs['return_fields']) + '}}').format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def history_twitter_data(idx, slug, **kwargs):
query_str = sgh.create_query_str('history_twitter_data', idx, slug, **kwargs)
return query_str
def price_volume_difference(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('price_volume_difference', **kwargs)
query_str = ("""
query_{idx}: priceVolumeDiff (
slug: \"{slug}\",
from: \"{from_date}\",
to: \"{to_date}\",
interval: \"{interval}\",
currency: \"{currency}\"
){{
""" + ' '.join(kwargs['return_fields']) + '}}').format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def top_transfers(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('top_transfers', **kwargs)
query_str = ("""
query_{idx}: topTransfers(
{address_selector}
slug: \"{slug}\",
from: \"{from_date}\",
to: \"{to_date}\"
){{
""" + ' '.join(kwargs['return_fields']) + '}}').format(idx=idx, slug=slug, **kwargs)
return query_str
def eth_top_transactions(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('eth_top_transactions', **kwargs)
query_str = ("""
query_{idx}: projectBySlug (slug: \"{slug}\"){{
ethTopTransactions (
from: \"{from_date}\",
to: \"{to_date}\",
limit: {limit},
transactionType: {transaction_type}
){{
""" + ' '.join(kwargs['return_fields']) + '}}}}').format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def news(idx, tag, **kwargs):
print('WARNING! This metric is going to be removed in version 0.8.0')
kwargs = sgh.transform_query_args('news', **kwargs)
query_str = ("""
query_{idx}: news(
tag: \"{tag}\",
from: \"{from_date}\",
to: \"{to_date}\",
size: {size}
){{
""" + ' '.join(kwargs['return_fields']) + '}}').format(
idx=idx,
tag=tag,
**kwargs
)
return query_str
def eth_spent_over_time(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('eth_spent_over_time', **kwargs)
query_str = """
query_{idx}: projectBySlug (slug: \"{slug}\"){{
ethSpentOverTime(
from: \"{from_date}\",
to: \"{to_date}\",
interval: \"{interval}\"
){{
datetime,
ethSpent
}}
}}
""".format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def token_top_transactions(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('token_top_transactions', **kwargs)
query_str = """
query_{idx}: projectBySlug (slug: \"{slug}\"){{
tokenTopTransactions (
from: \"{from_date}\",
to: \"{to_date}\",
limit: {limit}
){{
datetime,
fromAddress{{
address,
isExchange
}},
toAddress{{
address,
isExchange
}},
trxHash,
trxValue
}}
}}
""".format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def emerging_trends(idx, **kwargs):
kwargs = sgh.transform_query_args('emerging_trends', **kwargs)
query_str = ("""
query_{idx}: getTrendingWords (
from: \"{from_date}\",
to: \"{to_date}\",
size: {size},
interval: \"{interval}\"
){{""" + ' '.join(kwargs['return_fields']) + """
}}
""").format(
idx=idx,
**kwargs
)
return query_str
def top_social_gainers_losers(idx, **kwargs):
kwargs = sgh.transform_query_args('top_social_gainers_losers', **kwargs)
query_str = ("""
query_{idx}: topSocialGainersLosers(
from: \"{from_date}\",
to: \"{to_date}\",
status: {status},
size: {size},
timeWindow: \"{time_window}\"
){{
""" + ' '.join(kwargs['return_fields']) + """
}}
""").format(
idx=idx,
**kwargs
)
return query_str
def ohlcv(idx, slug, **kwargs):
return_fields = [
'openPriceUsd',
'closePriceUsd',
'highPriceUsd',
'lowPriceUsd',
'volume',
'marketcap']
batch = Batch()
batch.get(
"prices/{slug}".format(slug=slug),
**kwargs
)
batch.get(
"ohlc/{slug}".format(slug=slug),
**kwargs
)
[price_df, ohlc_df] = batch.execute()
merged = san.pandas_utils.merge(price_df, ohlc_df)
if merged.size != 0:
return merged[return_fields]
return merged
def __choose_selector_or_slugs(slugs, **kwargs):
if slugs:
# The interpolation strings will be surrounded by single quotes
# but the GraphQL spec requires double quotes.
selector_or_slugs = f'selector: {{slugs: {slugs}}}'.replace("'", '"')
else:
if 'slugs' in kwargs:
selector_or_slugs = kwargs['slugs']
elif 'selector' in kwargs:
selector_or_slugs = kwargs['selector']
else:
raise SanError('"slugs" or "selector" must be provided as an argument!')
return selector_or_slugs
def __choose_selector_or_slug(slug, **kwargs):
if slug:
selector_or_slug = f'slug:"{slug}"'
else:
if 'slug' in kwargs:
selector_or_slug = kwargs['slug']
elif 'selector' in kwargs:
selector_or_slug = kwargs['selector']
else:
raise SanError('"slug" or "selector" must be provided as an argument!')
return selector_or_slug
def get_metric_timeseries_data(idx, metric, slug=None, **kwargs):
kwargs = sgh.transform_query_args('get_metric', **kwargs)
selector_or_slug = __choose_selector_or_slug(slug, **kwargs)
transform_arg = _transform_arg_helper(kwargs)
query_str = ("""
query_{idx}: getMetric(metric: \"{metric}\"){{
timeseriesData(
{selector_or_slug}
{transform_arg}
from: \"{from_date}\"
to: \"{to_date}\"
interval: \"{interval}\"
aggregation: {aggregation}
includeIncompleteData: {include_incomplete_data}
){{
""" + ' '.join(kwargs['return_fields']) + """
}}
}}
""").format(
idx=idx,
metric=metric,
selector_or_slug=selector_or_slug,
transform_arg=transform_arg,
**kwargs
)
return query_str
def get_metric_timeseries_data_per_slug(idx, metric, slugs=None, **kwargs):
kwargs = sgh.transform_query_args('get_metric', **kwargs)
selector_or_slugs = __choose_selector_or_slugs(slugs, **kwargs)
transform_arg = _transform_arg_helper(kwargs)
query_str = ("""
query_{idx}: getMetric(metric: \"{metric}\"){{
timeseriesDataPerSlug(
{selector_or_slugs}
{transform_arg}
from: \"{from_date}\"
to: \"{to_date}\"
interval: \"{interval}\"
aggregation: {aggregation}
includeIncompleteData: {include_incomplete_data}
){{
datetime
data {{
slug
value
}}
}}
}}
""").format(
idx=idx,
metric=metric,
selector_or_slugs=selector_or_slugs,
transform_arg=transform_arg,
**kwargs
)
return query_str
def _transform_arg_helper(kwargs):
transform_arg_str = ''
if 'transform' in kwargs and isinstance(kwargs['transform'], dict):
transform_arg_str += 'transform:{'
for k,v in kwargs['transform'].items():
if isinstance(v, int):
transform_arg_str += f'{k}: {v}\n'
elif isinstance(v, str):
transform_arg_str += f'{k}: \"{v}\"\n'
else:
raise SanError(f'\"transform\" argument incorrect: {kwargs["transform"]}')
transform_arg_str += '}'
return transform_arg_str
def projects(idx, slug, **kwargs):
if (slug == "erc20"):
return sgh.erc20_projects(idx, **kwargs)
elif (slug == "all"):
return sgh.all_projects(idx, **kwargs)
raise SanError("Unknown project group: {}".format(slug))
def exchange_funds_flow(idx, slug, **kwargs):
query_str = sgh.create_query_str('exchange_funds_flow', idx, slug, **kwargs)
return query_str
def social_volume_projects(idx, **kwargs):
query_str = """
query_{idx}: socialVolumeProjects
""".format(idx=idx)
return query_str
def social_volume(idx, slug, **kwargs):
kwargs = sgh.transform_query_args('social_volume', **kwargs)
query_str = ("""
query_{idx}: socialVolume (
slug: \"{slug}\",
from: \"{from_date}\",
to: \"{to_date}\",
interval: \"{interval}\",
socialVolumeType: {social_volume_type}
){{
""" + ' '.join(kwargs['return_fields']) + '}}').format(
idx=idx,
slug=slug,
**kwargs
)
return query_str
def topic_search(idx, **kwargs):
kwargs = sgh.transform_query_args('topic_search', **kwargs)
query_str = ("""
query_{idx}: topicSearch (
source: {source},
searchText: \"{search_text}\",
from: \"{from_date}\",
to: \"{to_date}\",
interval: \"{interval}\"
){{
""" + ' '.join(kwargs['return_fields']) + """
}}
""").format(
idx=idx,
**kwargs
)
return query_str
def get_api_calls_made():
return """{
currentUser {
apiCallsHistory(from: "utc_now-30d", to: "utc_now", interval: "1d", authMethod: APIKEY) {
apiCallsCount, datetime
}
}}"""
|
/sanpy-0.11.6-py3-none-any.whl/san/sanbase_graphql.py
| 0.625781 | 0.210036 |
sanbase_graphql.py
|
pypi
|
import san.sanbase_graphql
from san.query_constants import DEPRECATED_QUERIES, CUSTOM_QUERIES, NO_SLUG_QUERIES
from san.sanbase_graphql_helper import QUERY_MAPPING
from san.graphql import execute_gql, get_response_headers
from san.query import get_gql_query, parse_dataset
from san.transform import transform_timeseries_data_query_result
from san.error import SanError
def get(dataset, **kwargs):
"""
The old way of using the `get` funtion is to provide the metric and slug
as a single string. This requires string interpolation.
Example:
san.get(
"daily_active_addresses/bitcoin"
from_date="2020-01-01"
to_date="2020-01-10")
The new and preferred way is to provide the slug as a separate parameter.
This allows more flexible selectors to be used instead of a single strings.
Examples:
san.get(
"daily_active_addresses",
slug="bitcoin",
from_date="2020-01-01"
to_date="2020-01-10")
san.get(
"dev_activity",
selector={"organization": "ethereum"},
from_date="utc_now-60d",
to_date="utc_now-40d")
"""
query, slug = parse_dataset(dataset)
if slug or query in NO_SLUG_QUERIES:
return __get_metric_slug_string_selector(query, slug, dataset, **kwargs)
elif query and not slug:
return __get(query, **kwargs)
def __get_metric_slug_string_selector(query, slug, dataset, **kwargs):
idx = kwargs.pop('idx', 0)
if query in DEPRECATED_QUERIES:
print(
'**NOTICE**\n{} will be deprecated in version 0.9.0, please use {} instead'.format(
query, DEPRECATED_QUERIES[query]))
if query in CUSTOM_QUERIES:
return getattr(san.sanbase_graphql, query)(idx, slug, **kwargs)
if query in QUERY_MAPPING.keys():
gql_query = '{' + get_gql_query(idx, dataset, **kwargs) + '}'
else:
if slug != '':
gql_query = '{' + \
san.sanbase_graphql.get_metric_timeseries_data(idx, query, slug, **kwargs) + '}'
else:
raise SanError('Invalid metric!')
res = execute_gql(gql_query)
return transform_timeseries_data_query_result(idx, query, res)
def __get(query, **kwargs):
if not ('selector' in kwargs or 'slug' in kwargs):
raise SanError('''
Invalid call of the get function,you need to either
give <metric>/<slug> as a first argument or give a slug
or selector as a key-word argument!''')
idx = kwargs.pop('idx', 0)
if query in QUERY_MAPPING.keys():
gql_query = '{' + get_gql_query(idx, query, **kwargs) + '}'
else:
gql_query = '{' + san.sanbase_graphql.get_metric_timeseries_data(idx, query, **kwargs) + '}'
res = execute_gql(gql_query)
return transform_timeseries_data_query_result(idx, query, res)
|
/sanpy-0.11.6-py3-none-any.whl/san/get.py
| 0.797596 | 0.217628 |
get.py
|
pypi
|
import iso8601
import datetime
_DEFAULT_INTERVAL = '1d'
_DEFAULT_SOCIAL_VOLUME_TYPE = 'TELEGRAM_CHATS_OVERVIEW'
_DEFAULT_SOURCE = 'TELEGRAM'
_DEFAULT_SEARCH_TEXT = ''
QUERY_MAPPING = {
'burn_rate': { # to be removed
'query': 'burnRate',
'return_fields': ['datetime', 'burnRate']
},
'token_age_consumed': {
'query': 'tokenAgeConsumed',
'return_fields': ['datetime', 'tokenAgeConsumed']
},
'average_token_age_consumed_in_days': {
'query': 'averageTokenAgeConsumedInDays',
'return_fields': ['datetime', 'tokenAge']
},
'prices': {
'query': 'historyPrice',
'return_fields': ['datetime', 'priceUsd', 'priceBtc', 'marketcap', 'volume']
},
'ohlc': {
'query': 'ohlc',
'return_fields': ['datetime', 'openPriceUsd', 'closePriceUsd', 'highPriceUsd', 'lowPriceUsd']
},
'exchange_funds_flow': {
'query': 'exchangeFundsFlow',
'return_fields': ['datetime', 'inOutDifference']
},
# OLD
'token_velocity': {
'query': 'tokenVelocity',
'return_fields': ['datetime', 'tokenVelocity']
},
# OLD
'token_circulation': {
'query': 'tokenCirculation',
'return_fields': ['datetime', 'tokenCirculation']
},
# OLD
'realized_value': {
'query': 'realizedValue',
'return_fields': ['datetime', 'realizedValue']
},
# OLD
'mvrv_ratio': {
'query': 'mvrvRatio',
'return_fields': ['datetime', 'ratio']
},
# OLD
'nvt_ratio': {
'query': 'nvtRatio',
'return_fields': ['datetime', 'nvtRatioCirculation', 'nvtRatioTxVolume']
},
# OLD
'daily_active_deposits': {
'query': 'dailyActiveDeposits',
'return_fields': ['datetime', 'activeDeposits']
},
'gas_used': {
'query': 'gasUsed',
'return_fields': ['datetime', 'gasUsed']
},
'miners_balance': {
'query': 'minersBalance',
'return_fields': ['balance', 'datetime']
},
'mining_pools_distribution': {
'query': 'miningPoolsDistribution',
'return_fields': ['datetime', 'other', 'top10', 'top3']
},
'history_twitter_data': {
'query': 'historyTwitterData',
'return_fields': ['datetime', 'followers_count']
},
'historical_balance': {
'query': 'historicalBalance',
'return_fields': ['datetime', 'balance']
},
# OLD
'social_dominance': {
'query': 'socialDominance',
'return_fields': ['datetime', 'dominance']
},
'top_holders_percent_of_total_supply': {
'query': 'topHoldersPercentOfTotalSupply',
'return_fields': ['datetime', 'inExchanges', 'outsideExchanges', 'inTopHoldersTotal']
},
'projects': {
'query': 'allProjects',
'return_fields': ['name', 'slug', 'ticker', 'totalSupply', 'marketSegment']
},
'get_metric': {
'query': 'getMetric',
'return_fields': [
'datetime',
'value'
]
},
'topic_search': {
'query': 'topicSearch',
'return_fields': [
('chartData', ['datetime, ''mentionsCount'])
]
},
'top_transfers': {
'query': 'topTransfers',
'return_fields': [
'datetime',
('fromAddress', ['address']),
('toAddress', ['address']),
'trxValue',
'trxHash'
]
},
'eth_top_transactions': {
'query': 'ethTopTransactions',
'return_fields': [
'datetime',
('fromAddress', ['address', 'isExchange']),
('toAddress', ['address', 'isExchange']),
'trxHash',
'trxValue'
]
},
'token_top_transactions': {
'query': 'tokenTopTransactions',
'return_fields': [
'datetime',
('fromAddress', ['address', 'isExchange']),
('toAddress', ['address', 'isExchange']),
'trxHash',
'trxValue'
]
},
'eth_spent_over_time': {
'query': 'ethSpentOverTime',
'return_fields': [
'datetime',
'ethSpent'
]
},
'news': {
'query': 'news',
'return_fields': [
'datetime',
'title',
'sourceName',
'url',
'description'
]
},
'price_volume_difference': {
'query': 'priceVolumeDiff',
'return_fields': [
'datetime',
'priceChange',
'priceVolumeDiff',
'volumeChange'
]
},
# OLD
'social_volume': {
'query': 'socialVolume',
'return_fields': [
'datetime',
'mentionsCount'
]
},
'top_social_gainers_losers': {
'query': 'topSocialGainersLosers',
'return_fields': [
'datetime',
('projects', ['change', 'slug', 'status'])
]
},
'emerging_trends': {
'query': 'getTrendingWords',
'return_fields': [
'datetime',
('topWords', ['score', 'word'])
]
},
'social_volume_projects': {}
}
def all_projects(idx, **kwargs):
kwargs = transform_query_args('projects', **kwargs)
query_str = ("""
query_{idx}: allProjects
{{
""" + ' '.join(kwargs['return_fields']) + '}}').format(idx=idx)
return query_str
def erc20_projects(idx, **kwargs):
kwargs = transform_query_args('projects', **kwargs)
query_str = ("""
query_{idx}: allErc20Projects
{{
""" + ' '.join(kwargs['return_fields']) + '}}').format(idx=idx)
return query_str
def create_query_str(query, idx, slug, **kwargs):
kwargs = transform_query_args(query, **kwargs)
query_str = ("""
query_{idx}: {query}(
slug: \"{slug}\",
from: \"{from_date}\",
to: \"{to_date}\",
interval: \"{interval}\"
){{
""" + ' '.join(kwargs['return_fields']) + '}}'
).format(
query=QUERY_MAPPING[query]['query'],
idx=idx,
slug=slug,
**kwargs
)
return query_str
def transform_selector(selector):
temp_selector = ''
for key, value in selector.items():
if (isinstance(value, str) and value.isdigit()) or isinstance(value, int):
temp_selector += f'{key}: {value}\n'
elif isinstance(value, str):
temp_selector += f'{key}: \"{value}\"\n'
elif isinstance(value, dict):
temp_selector += f'{key}:{{{transform_selector(value)}}}\n'
elif isinstance(value, bool):
temp_selector += (f'{key}: true\n' if value else f'{key}: false\n')
elif isinstance(value, list):
temp_value = map(lambda x: f'"{x}"', value)
temp_selector += f'{key}: [{",".join(temp_value)}]\n'
return temp_selector
def transform_query_args(query, **kwargs):
kwargs['from_date'] = kwargs['from_date'] if 'from_date' in kwargs else _default_from_date()
kwargs['to_date'] = kwargs['to_date'] if 'to_date' in kwargs else _default_to_date()
kwargs['interval'] = kwargs['interval'] if 'interval' in kwargs else _DEFAULT_INTERVAL
kwargs['social_volume_type'] = kwargs['social_volume_type'] if 'social_volume_type' in kwargs else _DEFAULT_SOCIAL_VOLUME_TYPE
kwargs['source'] = kwargs['source'] if 'source' in kwargs else _DEFAULT_SOURCE
kwargs['search_text'] = kwargs['search_text'] if 'search_text' in kwargs else _DEFAULT_SEARCH_TEXT
kwargs['aggregation'] = kwargs['aggregation'] if 'aggregation' in kwargs else 'null'
kwargs['include_incomplete_data'] = kwargs['include_incomplete_data'] if 'include_incomplete_data' in kwargs else False
# transform python booleans to strings so it's properly interpolated in the query string
kwargs['include_incomplete_data'] = 'true' if kwargs['include_incomplete_data'] else 'false'
if 'selector' in kwargs:
kwargs['selector'] = f'selector:{{{transform_selector(kwargs["selector"])}}}'
kwargs['address'] = kwargs['address'] if 'address' in kwargs else ''
kwargs['transaction_type'] = kwargs['transaction_type'] if 'transaction_type' in kwargs else 'ALL'
if kwargs['address'] != '':
if kwargs['transaction_type'] != '':
kwargs['address_selector'] = f'addressSelector:{{address:\"{kwargs["address"]}\", transactionType: {kwargs["transaction_type"]}}},'
else:
kwargs['address_selector'] = f'addressSelector:{{address:\"{kwargs["address"]}\"}},'
else:
kwargs['address_selector'] = ''
kwargs['from_date'] = _format_from_date(kwargs['from_date'])
kwargs['to_date'] = _format_to_date(kwargs['to_date'])
if 'return_fields' in kwargs:
kwargs['return_fields'] = _format_all_return_fields(kwargs['return_fields'])
else:
kwargs['return_fields'] = _format_all_return_fields(QUERY_MAPPING[query]['return_fields'])
return kwargs
def _default_to_date():
return datetime.datetime.utcnow()
def _default_from_date():
return datetime.datetime.utcnow() - datetime.timedelta(days=365)
def _format_from_date(datetime_obj_or_str):
if isinstance(datetime_obj_or_str, str) and 'utc_now' in datetime_obj_or_str:
return datetime_obj_or_str
if isinstance(datetime_obj_or_str, datetime.datetime):
datetime_obj_or_str = datetime_obj_or_str.isoformat()
return iso8601.parse_date(datetime_obj_or_str).isoformat()
def _format_to_date(datetime_obj_or_str):
if isinstance(datetime_obj_or_str, str) and 'utc_now' in datetime_obj_or_str:
return datetime_obj_or_str
if isinstance(datetime_obj_or_str, datetime.datetime):
return iso8601.parse_date(datetime_obj_or_str.isoformat())
try:
# Throw if the string is not date-formated, parse as date otherwise
datetime.datetime.strptime(datetime_obj_or_str, '%Y-%m-%d')
dt = iso8601.parse_date(datetime_obj_or_str) + \
datetime.timedelta(hours=23, minutes=59, seconds=59)
except:
dt = iso8601.parse_date(datetime_obj_or_str)
return dt.isoformat()
def _format_all_return_fields(fields):
while any(isinstance(x, tuple) for x in fields):
fields = _format_return_fields(fields)
return fields
def _format_return_fields(fields):
return list(map(
lambda el: el[0] + '{{' + ' '.join(el[1]) + '}}' if isinstance(el, tuple) else el
, fields))
|
/sanpy-0.11.6-py3-none-any.whl/san/sanbase_graphql_helper.py
| 0.408041 | 0.325119 |
sanbase_graphql_helper.py
|
pypi
|
import operator
import pandas as pd
from san.pandas_utils import convert_to_datetime_idx_df
from functools import reduce
from collections import OrderedDict
from san.graphql import execute_gql
from san.error import SanError
from san.sanbase_graphql_helper import QUERY_MAPPING
QUERY_PATH_MAP = {
'eth_top_transactions': ['ethTopTransactions'],
'eth_spent_over_time': ['ethSpentOverTime'],
'token_top_transactions': ['tokenTopTransactions'],
'get_metric': ['timeseriesData'],
'get_metric_many': ['timeseriesDataPerSlug'],
'topic_search': ['chartData']
}
def path_to_data(idx, query, data):
"""
With this function we jump straight onto the key from the dataframe,
that we want and start from there. We use our future starting points from the QUERY_PATH_MAP.
"""
return reduce(
operator.getitem, [
'query_' + str(idx), ] + QUERY_PATH_MAP[query], data)
def transform_timeseries_data_query_result(idx, query, data):
"""
If there is a transforming function for this query, then the result is
passed for it for another transformation
"""
if query in QUERY_PATH_MAP:
result = path_to_data(idx, query, data)
elif query in QUERY_MAPPING:
result = data['query_' + str(idx)]
else:
result = path_to_data(idx, 'get_metric', data)
if query + '_transform' in globals():
result = globals()[query + '_transform'](result)
return convert_to_datetime_idx_df(result)
def transform_timeseries_data_per_slug_query_result(idx, query, data):
if query in QUERY_MAPPING or query in QUERY_MAPPING:
raise SanError(f'The get_many call is available only for get_metric. Called with {query}')
result = path_to_data(idx, 'get_metric_many', data)
rows = []
for datetime_point in result:
row = {'datetime': datetime_point['datetime']}
for slug_data in datetime_point['data']:
row[slug_data['slug']] = slug_data['value']
rows.append(row)
return convert_to_datetime_idx_df(rows)
def eth_top_transactions_transform(data):
return list(map(lambda column: {
'datetime': column['datetime'],
'fromAddress': column['fromAddress']['address'],
'fromAddressIsExchange': column['fromAddress']['isExchange'],
'toAddress': column['toAddress']['address'],
'toAddressIsExchange': column['toAddress']['isExchange'],
'trxHash': column['trxHash'],
'trxValue': column['trxValue']
}, data))
def top_transfers_transform(data):
return list(map(lambda column: {
'datetime': column['datetime'],
'fromAddress': column['fromAddress']['address'],
'toAddress': column['toAddress']['address'],
'trxHash': column['trxHash'],
'trxValue': column['trxValue']
}, data))
def news_transform(data):
result = list(map(lambda column: OrderedDict({
'datetime': column['datetime'],
'title': column['title'],
'description': column['description'],
'sourceName': column['sourceName'],
'url': column['url']
}), data))
return result
def token_top_transactions_transform(data):
return list(map(lambda column: {
'datetime': column['datetime'],
'fromAddress': column['fromAddress']['address'],
'fromAddressIsExchange': column['fromAddress']['isExchange'],
'toAddress': column['toAddress']['address'],
'toAddressIsExchange': column['toAddress']['isExchange'],
'trxHash': column['trxHash'],
'trxValue': column['trxValue']
}, data))
def emerging_trends_transform(data):
result = []
for column in data:
for i in range(0, len(column['topWords'])):
result.append({
'datetime': column['datetime'],
'score': column['topWords'][i]['score'],
'word': column['topWords'][i]['word']
})
result.sort(key=lambda elem: elem['datetime'])
return result
def top_social_gainers_losers_transform(data):
result = []
for column in data:
for i in range(0, len(column['projects'])):
result.append({
'datetime': column['datetime'],
'slug': column['projects'][i]['slug'],
'change': column['projects'][i]['change'],
'status': column['projects'][i]['status'],
})
result = list(map(lambda column: OrderedDict({
'datetime': column['datetime'],
'slug': column['slug'],
'change': column['change'],
'status': column['status']
}), result))
return result
|
/sanpy-0.11.6-py3-none-any.whl/san/transform.py
| 0.61231 | 0.416114 |
transform.py
|
pypi
|
import re
import datetime
import pandas as pd
def convert_dt(timestamp_string, postfix=' 00:00:00'):
if type(timestamp_string) == datetime.date:
timestamp_string = timestamp_string.strftime('%Y-%m-%d')
if type(timestamp_string) == datetime.datetime:
timestamp_string = timestamp_string.strftime('%Y-%m-a%d %H:%M:%S')
timestamp_string = timestamp_string.replace('Z', '').replace('T', ' ')
timestamp_string = timestamp_string[:19]
if re.match(r'\d\d\d\d-\d\d-\d\d.\d\d:\d\d:\d\d', timestamp_string):
return timestamp_string[:10] + ' ' + timestamp_string[11:]
elif re.match(r'\d\d\d\d-\d\d-\d\d', timestamp_string):
return timestamp_string + postfix
else:
raise Exception(f"Unknown format: {timestamp_string} !")
def str_to_ts(x):
if isinstance(x, datetime.datetime):
return x
return datetime.datetime.strptime(convert_dt(x), '%Y-%m-%d %H:%M:%S')
def parse_str_to_timedelta(time_str):
regex = re.compile(r'((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?')
parts = regex.match(time_str.lower())
if not parts:
return
parts = parts.groupdict()
time_params = {}
for name, param in parts.items():
if param:
time_params[name] = int(param)
return datetime.timedelta(**time_params)
def resample_dataframe(source_df: pd.DataFrame,
resample_interval: str or datetime.timedelta,
values_column_name: str,
grouping_column_name: str or None = None,
resample_function: str = 'pad'
):
if isinstance(resample_interval, str):
resample_interval = parse_str_to_timedelta(resample_interval)
if not isinstance(resample_interval, datetime.timedelta):
return
df = source_df.copy()
if grouping_column_name:
df = df.groupby(grouping_column_name)
df = pd.DataFrame(getattr(df[values_column_name].resample(resample_interval), resample_function)())
if grouping_column_name:
df = df.reset_index(grouping_column_name)
return df
|
/sanpy-0.11.6-py3-none-any.whl/san/extras/utils.py
| 0.46393 | 0.308425 |
utils.py
|
pypi
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as pyplot
from datetime import timedelta
from scipy import stats
from IPython.display import display
"""
Event study to evaluate events or signals.
The main parameters the event study function accepts are a pandas dataframe containing the price data
of the observed projects and the benchmark (data) and dataframe containing the events(ev_data)
that contains the data of occurance in the index and the name of the project for every date.
"""
FIGURE_WIDTH = 20
FIGURE_HEIGHT = 7
FIGURE_SIZE = [FIGURE_WIDTH, FIGURE_HEIGHT]
COLOR_1 = '#14c393' # used as a main color, jungle-green
COLOR_2 = '#ffad4d' # used as a second color for 2-line charts, texas-rose
COLOR_3 = '#5275ff' # benchmark, dodger-blue
COLOR_4 = '#ff5b5b' # custom red, persommin
FONT_SIZE_LEGEND = 18
FONT_SIZE_AXES = 14
def get_close_price(data, sid, current_date, day_number, interval):
# If we're looking at day 0 just return the indexed date
if day_number == 0:
return data.loc[current_date][sid]
# Find the close price day_number away from the current_date
else:
# If the close price is too far ahead, just get the last available
total_date_index_length = len(data.index)
# Find the closest date to the target date
date_index = data.index.searchsorted(current_date + interval*day_number)
# If the closest date is too far ahead, reset to the latest date possible
date_index = total_date_index_length - 1 if date_index >= total_date_index_length else date_index
# Use the index to return a close price that matches
return data.iloc[date_index][sid]
def get_first_price(data, starting_point, sid, date, interval):
starting_day = date - interval*starting_point
date_index = data.index.searchsorted(starting_day)
return data.iloc[date_index][sid]
def remove_outliers(returns, num_std_devs):
return returns[~((returns-returns.mean()).abs() > num_std_devs*returns.std())]
def get_returns(data, starting_point, sid, date, day_num, interval):
first_price = get_first_price(data, starting_point, sid, date, interval)
close_price = get_close_price(data, sid, date, day_num, interval)
if first_price == 0:
return 0
return (close_price - first_price) / (first_price + 0.0)
def calc_beta(stock, benchmark, price_history):
"""
Calculate beta amounts for each security
"""
stock_prices = price_history[stock].pct_change().dropna()
bench_prices = price_history[benchmark].pct_change().dropna()
aligned_prices = bench_prices.align(stock_prices, join='inner')
bench_prices = aligned_prices[0]
stock_prices = aligned_prices[1]
bench_prices = np.array(bench_prices.values)
stock_prices = np.array(stock_prices.values)
bench_prices = np.reshape(bench_prices, len(bench_prices))
stock_prices = np.reshape(stock_prices, len(stock_prices))
if len(stock_prices) == 0:
return None
# market_beta, benchmark_beta = np.polyfit(bench_prices, stock_prices, 1)
slope, intercept, r_value, p_value, stderr = stats.linregress(bench_prices, stock_prices)
return slope
def timedelta_format(seconds):
numbers = [3600*24, 3600, 60, 1]
words = [' day', ' hour', ' minute', ' second']
values = [0, 0, 0, 0]
value = int(seconds)
text = ''
ind = 0
while value > 0:
units = value // numbers[ind]
rem = value % numbers[ind]
values[ind] = units
value = rem
if units > 0:
if len(text) > 0:
text += ', '
text = text + str(units) + words[ind]
if units > 1:
text += 's'
ind += 1
return text
def neg(n):
return -1*n
def build_x_ticks(day_numbers, number_of_ticks):
max_value = day_numbers[len(day_numbers)-1]
if max_value < 30:
return [d for d in day_numbers if d % 2 == 0]
else:
n1 = round(max_value / number_of_ticks)
if n1 % 2 > 0:
n1 += 1
base = [i for i in range(n1, max_value + 1, n1)]
base_neg = base.copy()
base_neg.reverse()
base_neg = [i * (-1) for i in base_neg]
return base_neg + [0] + base
def plot_cumulative_returns(returns, x_ticks, events, interval_text):
pyplot.figure(figsize=FIGURE_SIZE)
returns.plot(xticks=x_ticks, label="events=%s" % events, color=COLOR_1)
pyplot.axvline(x=0, color='black', alpha=.3)
pyplot.title("Cumulative Return from Events")
pyplot.xlabel("Time Window (t), 1t="+interval_text)
pyplot.ylabel("Cumulative Return (r)")
pyplot.grid(b=None, which=u'major', axis=u'y')
pyplot.legend()
def plot_average_returns(returns, benchmark_returns, x_ticks, interval_text):
pyplot.figure(figsize=FIGURE_SIZE)
returns.plot(xticks=x_ticks, label="Cumulative Return from Events", color=COLOR_1)
benchmark_returns.plot(xticks=x_ticks, label='Benchmark', color=COLOR_3)
pyplot.axvline(x=0, color='black', alpha=.3)
pyplot.title("Benchmark's average returns around that time to Signals_Events")
pyplot.ylabel("% Cumulative Return")
pyplot.xlabel("Time Window (t), 1t="+interval_text)
pyplot.grid(b=None, which=u'major', axis=u'y')
pyplot.legend()
def plot_cumulative_abnormal_returns(returns, abnormal_returns, x_ticks, interval_text):
pyplot.figure(figsize=FIGURE_SIZE)
returns.plot(xticks=x_ticks, label="Average Cumulative Returns", color=COLOR_1)
abnormal_returns.plot(xticks=x_ticks, label="Abnormal Average Cumulative Returns", color=COLOR_2)
pyplot.axhline(
y=abnormal_returns.loc[0],
linestyle='--',
color='black',
alpha=.3,
label='Drift'
)
pyplot.axhline(
y=abnormal_returns.max(),
linestyle='--',
color='black',
alpha=.3
)
pyplot.axvline(x=0, color='black', alpha=.3)
pyplot.title("Cumulative Abnormal Returns versus Cumulative Returns")
pyplot.ylabel("% Cumulative Return")
pyplot.xlabel("Time Window (t), 1t="+interval_text)
pyplot.grid(b=None, which=u'major', axis=u'y')
pyplot.legend()
def plot_cumulative_return_with_errors(returns, std_devs, events):
"""
Plotting the same graph but with error bars
"""
pyplot.figure(figsize=FIGURE_SIZE)
pyplot.axvline(x=0, color='black', alpha=.3)
pyplot.errorbar(returns.index,
returns,
xerr=0,
yerr=std_devs,
label="events=%s" % events,
color=COLOR_1)
pyplot.grid(b=None, which=u'major', axis=u'y')
pyplot.title("Cumulative Return from Events with error")
pyplot.xlabel("Window Length (t)")
pyplot.ylabel("Cumulative Return (r)")
pyplot.legend()
pyplot.show()
def plot_abnormal_cumulative_return_with_errors(abnormal_volatility, abnormal_returns, events):
"""
Capturing volatility of abnormal returns
"""
pyplot.figure(figsize=FIGURE_SIZE)
pyplot.errorbar(
abnormal_returns.index,
abnormal_returns,
xerr=0,
yerr=abnormal_volatility,
label="events=%s" % events,
color=COLOR_1
)
pyplot.axvline(x=0, color='black', alpha=.3)
pyplot.grid(b=None, which=u'major', axis=u'y')
pyplot.title("Abnormal Cumulative Return from Events with error")
pyplot.xlabel("Window Length (t)")
pyplot.ylabel("Cumulative Return (r)")
pyplot.legend()
pyplot.show()
def build_day_numbers(starting_point):
"""
Create our range of day_numbers that will be used to calculate returns
Looking from -starting_point to +starting_point to create timeframe band
"""
return [i for i in range(-starting_point, starting_point+1)]
def get_price_history(data, date, beta_window, sid, benchmark):
"""
Create a DataFrame containing the data for the necessary sids within that time frame
"""
if not beta_window:
history_index = data.index.searchsorted(date)
history_index_start = data.index.searchsorted(data[data[sid] != 0].index[0])
histotical_prices = data.iloc[history_index_start:history_index][[sid, benchmark]]
else:
history_index = data.index.searchsorted(date)
history_index_start = max([history_index - beta_window, 0])
histotical_prices = data.iloc[history_index_start:history_index][[sid, benchmark]]
histotical_prices = histotical_prices[histotical_prices[sid] != 0]
return histotical_prices[histotical_prices != 0].dropna()
def compute_return_matrix(ev_data, data, sample_size, starting_point,
day_num, benchmark, returns, benchmark_returns, abnormal_returns, beta_window, interval):
"""
Computes the returns for the project, benchmark and abnormal
"""
for date, row in ev_data.iterrows():
sid = row.symbol
if date not in data.index or sid not in data.columns:
continue
if sid == 'ethereum' and benchmark == 'ethereum':
benchmark = 'bitcoin'
elif sid == 'bitcoin' and benchmark == 'bitcoin':
benchmark = 'ethereum'
project_return = get_returns(data, starting_point, sid, date, day_num, interval)
benchmark_return = get_returns(data, starting_point, benchmark, date, day_num, interval)
returns.append(project_return)
benchmark_returns.append(benchmark_return)
sample_size += 1
beta = calc_beta(sid, benchmark, get_price_history(data, date, beta_window, sid, benchmark))
if beta is None:
continue
abnormal_return = project_return - (beta * benchmark_return)
abnormal_returns.append(abnormal_return)
return sample_size
def compute_averages(ev_data, data, starting_point, day_numbers,
benchmark, all_returns, all_std_devs,
total_sample_size, all_benchmark_returns,
abnormal_volatility, all_abnormal_returns, beta_window, interval):
"""
Computes the avegare returns and standards deviation of the events
"""
for day_num in day_numbers:
returns = []
benchmark_returns = []
abnormal_returns = []
sample_size = 0
sample_size = compute_return_matrix(ev_data, data, sample_size, starting_point,
day_num, benchmark, returns, benchmark_returns,
abnormal_returns, beta_window, interval)
returns = pd.Series(returns).dropna()
returns = remove_outliers(returns, 2)
abnormal_returns = pd.Series(abnormal_returns).dropna()
abnormal_returns = remove_outliers(abnormal_returns, 2)
all_returns[day_num] = np.average(returns)
all_std_devs[day_num] = np.std(returns)
total_sample_size[day_num] = sample_size
all_benchmark_returns[day_num] = np.average(pd.Series(benchmark_returns).dropna())
abnormal_volatility[day_num] = np.std(abnormal_returns)
all_abnormal_returns[day_num] = np.average(abnormal_returns)
def clean_data(data, events, starting_point):
"""
Cleans signals that does not have enough pricing data
"""
events_df = events.copy(deep=True)
events_df['in_pricesdf'] = 0
id = 0
for date, row in events_df.iterrows():
sid = row.symbol
if date not in data.index or sid not in data.columns:
events_df.iloc[id, -1] = 1
id = id+1
continue
event_day = data.index.searchsorted(date)
hist_index_start = event_day - starting_point
hist_index_end = event_day + starting_point
event_window = data.iloc[hist_index_start:hist_index_end][[sid]]
if event_window.min()[0] == 0 or len(event_window) == 0 or True in pd.isnull(list(event_window[sid])):
events_df.iloc[id, -1] = 1
id = id+1
return events_df[events_df['in_pricesdf'] == 0]
def event_study(data,
events,
starting_point=30,
benchmark='bitcoin',
origin_zero=True,
beta_window=None,
interval=timedelta(days=1),
x_ticks_amount=12):
ev_data = clean_data(data, events, starting_point)
all_returns = {}
all_std_devs = {}
all_benchmark_returns = {}
all_abnormal_returns = {}
abnormal_volatility = {}
total_sample_size = {}
day_numbers = build_day_numbers(starting_point)
compute_averages(ev_data, data, starting_point, day_numbers,
benchmark, all_returns, all_std_devs,
total_sample_size, all_benchmark_returns,
abnormal_volatility, all_abnormal_returns, beta_window, interval)
plotting_events(day_numbers, all_returns, all_benchmark_returns, all_abnormal_returns,
all_std_devs, abnormal_volatility,
total_sample_size, origin_zero, x_ticks_amount, interval)
def signals_format(signals, project):
"""
Returns signals in the needed format.
Accepts a column with the signals as boolean values and the projects name as a string
"""
sign = pd.DataFrame(signals)
sign.columns = ['symbol']
sign = sign.replace(True, project)
events_ = sign[sign["symbol"] == project]
return events_
def plotting_events(day_numbers, all_returns, all_benchmark_returns, all_abnormal_returns, all_std_devs,
abnormal_volatility, total_sample_size, origin_zero, x_ticks_amount, interval):
all_returns = pd.Series(all_returns)
all_std_devs = pd.Series(all_std_devs)
all_benchmark_returns = pd.Series(all_benchmark_returns)
all_abnormal_returns = pd.Series(all_abnormal_returns)
abnormal_volatility = pd.Series(abnormal_volatility)
events = np.average(pd.Series(total_sample_size))
if origin_zero:
all_returns = all_returns - all_returns.loc[0]
all_benchmark_returns = all_benchmark_returns - all_benchmark_returns.loc[0]
all_abnormal_returns = all_abnormal_returns - all_abnormal_returns.loc[0]
all_std_devs = all_std_devs - all_std_devs.loc[0]
abnormal_volatility = abnormal_volatility - abnormal_volatility.loc[0]
all_std_devs.loc[:-1] = 0
abnormal_volatility.loc[:-1] = 0
x_ticks = build_x_ticks(day_numbers, x_ticks_amount)
plot_cumulative_returns(
returns=all_returns,
events=events,
x_ticks=x_ticks,
interval_text=timedelta_format(interval.total_seconds())
)
plot_average_returns(
returns=all_returns,
benchmark_returns=all_benchmark_returns,
x_ticks=x_ticks,
interval_text=timedelta_format(interval.total_seconds())
)
plot_cumulative_abnormal_returns(
returns=all_returns,
abnormal_returns=all_abnormal_returns,
x_ticks=x_ticks,
interval_text=timedelta_format(interval.total_seconds())
)
plot_cumulative_return_with_errors(
returns=all_returns,
std_devs=all_std_devs,
events=events
)
plot_abnormal_cumulative_return_with_errors(
abnormal_volatility=abnormal_volatility,
abnormal_returns=all_abnormal_returns,
events=events
)
def calc_beta_testing(stock, benchmark, price_history):
"""
Calculate beta and alpha amounts for each security
"""
stock_prices = np.log(1+price_history[stock].pct_change().dropna())
bench_prices = np.log(1+price_history[benchmark].pct_change().dropna())
aligned_prices = bench_prices.align(stock_prices, join='inner')
bench_prices = aligned_prices[0]
stock_prices = aligned_prices[1]
bench_prices = np.array(bench_prices.values)
stock_prices = np.array(stock_prices.values)
bench_prices = np.reshape(bench_prices, len(bench_prices))
stock_prices = np.reshape(stock_prices, len(stock_prices))
if len(stock_prices) == 0:
return None
# market_beta, benchmark_beta = np.polyfit(bench_prices, stock_prices, 1)
slope, intercept, r_value, p_value, stderr = stats.linregress(bench_prices, stock_prices)
return slope, intercept
def compute_beta_alpha(data, ev_data, starting_point, benchmark):
"""
Includes beta and alpha in the event dataframe
"""
betas_df = ev_data.copy(deep=True)
betas_df['beta'] = 0
betas_df['alpha'] = 0
id = 0
for date, row in betas_df.iterrows():
sid = row.symbol
if date not in data.index or sid not in data.columns:
continue
if sid == 'ethereum' and benchmark == 'ethereum':
benchmark = 'bitcoin'
elif sid == 'bitcoin' and benchmark == 'bitcoin':
benchmark = 'ethereum'
coeff = calc_beta_testing(sid, benchmark, get_price_history(data, date, starting_point, sid, benchmark))
if coeff:
beta, alpha = coeff
betas_df.iloc[id, -2] = beta
betas_df.iloc[id, -1] = alpha
id = id+1
return betas_df.reset_index()
def calculate_ab_returns(returns_df, betas_df, intercept, benchmark):
"""
Calculate abnormal returns for every event case
"""
ab_returns = pd.DataFrame()
for number, dta in betas_df.iterrows():
sid = dta.symbol
ind = number
alpha = dta.alpha
beta = dta.beta
if not intercept:
ab_returns[ind] = returns_df[sid]-beta*returns_df[benchmark]
else:
ab_returns[ind] = returns_df[sid]-(alpha+beta*returns_df[benchmark])
return ab_returns.dropna()
def ab_returns_matrix(ab_returns, betas_df, starting_point):
"""
Maps the abnormal returns for every event
"""
abnormal_returns_df = ab_returns.reset_index()
new_sample = {}
for number, dta in betas_df.iterrows():
eventdate = dta.datetime
sid = number
# find specific event row, look where Date is equal to event_date
col_name = abnormal_returns_df.columns[0]
row = abnormal_returns_df.loc[abnormal_returns_df[col_name] == eventdate]
# get index of row
index = row.index[0]
# select starting_point plus and starting_point minus around that row
my_sample = abnormal_returns_df.loc[(index - starting_point):(index + starting_point), sid].reset_index(drop=True)
# add to new set
new_sample[number] = my_sample
return new_sample
def calculate_stats(new_sample, starting_point):
"""
Calculates t statistics for AARs and CAARs
"""
ev = pd.DataFrame(new_sample)
ev.index = ev.index - starting_point
# Calulate CARs
ev_cumulative = ev.cumsum()
# Calculate t statistics for AARs
mean_AR = ev.mean(axis=1)
std_AR = ev.std(axis=1)
results = pd.DataFrame(mean_AR, columns=['AAR'])
# results['STD AR'] = std_AR
results['t-AAR'] = mean_AR / std_AR
results['P-Value t-AAR'] = stats.norm.cdf(results['t-AAR'])
# Calculate t statistics for CAARs
mean_CAR = ev_cumulative.mean(axis=1)
std_CAR = ev_cumulative.std(axis=1)
results['CAAR'] = mean_CAR
# results['STD CAR'] = std_CAR
results['t-CAAR'] = mean_CAR / std_CAR
results['P-Value t-CAAR'] = stats.norm.cdf(results['t-CAAR'])
return results
def plot_ARR_CAAR(results):
fig, ax = pyplot.subplots(figsize=FIGURE_SIZE)
ax.set_title('ARS vs CARS', fontsize=20)
ax.plot(results.index, results['AAR'], color=COLOR_4, marker="o")
ax.set_xlabel("Days", fontsize=14)
ax.set_ylabel("AAR", color=COLOR_4, fontsize=14)
ax2 = ax.twinx()
ax2.plot(results.index, results['CAAR'], color=COLOR_1, marker="o")
ax2.set_ylabel("CAAR", color=COLOR_1, fontsize=14)
pyplot.show()
def plot_CI(tstats, pvalues, CI):
c = stats.norm().isf((1-CI)/2)
fig, ax = pyplot.subplots(nrows=2, figsize=FIGURE_SIZE)
ax[0].set_title(tstats.name+' Statistic', fontsize=20)
ax[1].set_title('P-Values ', fontsize=20)
tstats.plot(ax=ax[0], label=tstats.name, color=COLOR_1)
ax[0].axhline(y=c, linestyle='--', color=COLOR_4, alpha=.9, label='Significance Line (' + str(round(c, 2)) + ')')
ax[0].axhline(y=-c, linestyle='--', color=COLOR_4, alpha=.9)
ax[0].legend()
ax[1].bar(pvalues.index, pvalues, label=pvalues.name, color=COLOR_1)
ax[1].axhline(y=(1-CI)/2, linestyle='--', color=COLOR_4, alpha=.9, label='Significance Line('+str(round((1-CI)/2, 2))+')')
ax[1].legend()
def get_log_returns(data):
# Get arithmetic returns
arithmetic_returns = data.pct_change()
# Transform to log returns
arithmetic_returns = 1+arithmetic_returns
returns_array = np.log(arithmetic_returns, out=np.zeros_like(arithmetic_returns), where=(arithmetic_returns != 0))
return pd.DataFrame(returns_array, index=data.index, columns=data.columns).fillna(0)
def hypothesis_test(data, ev_data, starting_point, benchmark='ethereum', intercept=True, CI=.95, interval=timedelta(days=1)):
# Drops events with no pricing data
cleaned_events = clean_data(data, ev_data, starting_point)
# Call function to calculate betas for events
betas_df = compute_beta_alpha(data, cleaned_events, starting_point, benchmark)
# Get log returns
returns_df = get_log_returns(data)
# Calculate abnormal returns
ab_returns = calculate_ab_returns(returns_df, betas_df, intercept, benchmark)
# Maps the abnorml returns for every event
new_sample = ab_returns_matrix(ab_returns, betas_df, starting_point)
# Calculate Statistics
results = calculate_stats(new_sample, starting_point)
display(results)
# Plotting Functions
plot_ARR_CAAR(results)
plot_CI(results['t-AAR'], results['P-Value t-AAR'], CI)
plot_CI(results['t-CAAR'], results['P-Value t-CAAR'], CI)
|
/sanpy-0.11.6-py3-none-any.whl/san/extras/event_study.py
| 0.759136 | 0.592283 |
event_study.py
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sans_distributions-0.1.tar.gz/sans_distributions-0.1/sans_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
from __future__ import annotations
from dataclasses import dataclass
import typing
from .types import JsonDict, JsonList, JsonPrimitive
@dataclass
class JsonRpcError:
""" Represents an error in the JSON RPC protocol. """
code: int
message: str
data: typing.Optional[JsonDict] = None
def to_json_dict(self) -> JsonDict:
""" Convert to a JSON dictionary. """
dict_ = typing.cast(JsonDict, {"code": self.code, "message": self.message})
if self.data is not None:
dict_["data"] = self.data
return dict_
@classmethod
def from_json_dict(cls, json_dict: JsonDict) -> JsonRpcError:
""" Return a new response from a JSON dictionary. """
return cls(
code=typing.cast(int, json_dict["code"]),
message=typing.cast(str, json_dict["message"]),
data=typing.cast(typing.Optional[JsonDict], json_dict.get("data")),
)
def __repr__(self) -> str:
""" Return string representation. """
data = repr(self.data)
message = repr(self.message)
return f"JsonRpcError(code={self.code}, message={message}, data={data})"
class JsonRpcException(Exception):
"""
A base class for JSON-RPC exceptions.
This exception is never thrown by ``sansio-jsonrpc`` but since it is an ancestor for
JSON-RPC exceptions, it is useful in `try/except` blocks as a catch-all. You
generally should not instantiate this class yourself. Instead, use
class:`JsonRpcApplicationException`, either directly or by creating your own
subclass of it.
"""
def __init__(self, error: JsonRpcError):
""" Constructor. """
self._error: JsonRpcError = error
@property
def code(self):
""" The JSON-RPC error code. """
return self._error.code
@property
def message(self):
""" A JSON-RPC error message. """
return self._error.message
@property
def data(self) -> typing.Optional[JsonDict]:
""" Arbitrary data attached to the error. """
return self._error.data
def get_error(self) -> JsonRpcError:
""" Return the error underlying this exception. """
return self._error
def __repr__(self):
cls = self.__class__.__name__
code = self._error.code
data = repr(self._error.data)
message = repr(self._error.message)
return f"{cls}<code={code}, message={message}, data={data}>"
@staticmethod
def exc_from_error(error: JsonRpcError) -> JsonRpcException:
"""
Create a new exception derived from the given error.
When you receive an error response, you may want to raise a corresponding
exception. This method finds an appropriate exception subclass that matches the
error's numeric code and instantiates it. It uses some metaclass black magic so
that it can even return custom subclasses you define in your own code!
"""
exc: JsonRpcException
if -32768 <= error.code <= -32000:
exc = JsonRpcReservedError.exc_from_error(error)
else:
exc = JsonRpcApplicationError.exc_from_error(error)
return exc
class JsonRpcReservedErrorMeta(type):
"""
This metaclass builds a map of error codes to classes.
This is used to find the right class to instantiate when an error response is
received. It also enforces the requirement that reserved error codes must be in the
range [-32768, -32000].
"""
_error_classes: typing.Dict[int, type] = dict()
def __init__(cls, name, bases, attrs):
if cls.ERROR_CODE is not None and (
cls.ERROR_CODE < -32768 or cls.ERROR_CODE > -32000
):
raise RuntimeError(
"Subclasses of JsonRpcReservedError must set ERROR_CODE in range "
"[-32768, -32000]."
)
JsonRpcReservedErrorMeta._error_classes[cls.ERROR_CODE] = cls
class JsonRpcReservedError(JsonRpcException, metaclass=JsonRpcReservedErrorMeta):
"""
An exception corresponding to the range of codes reserved by the spec.
The error code must be in the range [-32768, -32000].
"""
ERROR_CODE: int = -32000
ERROR_MESSAGE: str = "JSON-RPC reserved error"
def __init__(
self,
message: typing.Optional[str] = None,
data: typing.Optional[JsonDict] = None,
):
error = JsonRpcError(self.ERROR_CODE, message or self.ERROR_MESSAGE, data)
super().__init__(error)
@staticmethod
def exc_from_error(error: JsonRpcError) -> JsonRpcReservedError:
"""
Create a new reserved exception that corresponds to the error code.
This first searches for a subclass that is registered with the given error code.
If it does not find such a subclass, then it returns a ``JsonRpcReservedError``
instead.
"""
cls = JsonRpcReservedError._error_classes.get(error.code, JsonRpcReservedError)
return cls(error.message, error.data)
class JsonRpcApplicationErrorMeta(type):
"""
This metaclass builds a map of error codes to classes.
This is used to find the right class to instantiate when an error response is
received. It also enforces the requirement that application error codes must **not**
be in the range [-32768, -32000].
"""
_error_classes: typing.Dict[int, type] = dict()
def __init__(cls, name, bases, attrs):
if (
cls.ERROR_CODE is not None
and cls.ERROR_CODE >= -32768
and cls.ERROR_CODE <= -32000
):
raise RuntimeError(
"Subclasses of JsonRpcReservedError must set ERROR_CODE outside the "
" range [-32768, -32000]."
)
JsonRpcApplicationErrorMeta._error_classes[cls.ERROR_CODE] = cls
class JsonRpcApplicationError(JsonRpcException, metaclass=JsonRpcApplicationErrorMeta):
"""
An exception corresponding to the unreserved range of error codes.
The error code must **not** be in the range [-32768, -32000].
"""
ERROR_CODE: int = -1
ERROR_MESSAGE: str = "JSON-RPC"
def __init__(
self,
message: typing.Optional[str] = None,
*,
data: typing.Optional[JsonDict] = None,
code: typing.Optional[int] = None,
):
error = JsonRpcError(
code or self.ERROR_CODE, message or self.ERROR_MESSAGE, data
)
super().__init__(error)
@staticmethod
def exc_from_error(error: JsonRpcError) -> JsonRpcApplicationError:
cls = JsonRpcApplicationError._error_classes.get(
error.code, JsonRpcApplicationError
)
return cls(error.message, data=error.data)
class JsonRpcParseError(JsonRpcReservedError):
""" Invalid JSON was received by the server. """
ERROR_CODE = -32700
ERROR_MESSAGE = "Invalid JSON was received by the server."
class JsonRpcInvalidRequestError(JsonRpcReservedError):
""" The JSON sent is not a valid Request object. """
ERROR_CODE = -32600
ERROR_MESSAGE = "The JSON sent is not a valid Request object."
class JsonRpcMethodNotFoundError(JsonRpcReservedError):
"""
The method does not exist / is not available.
This exception is never thrown by ``sansio-jsonrpc``. It should be thrown in
downstream libraries.
"""
ERROR_CODE = -32601
ERROR_MESSAGE = "The method does not exist / is not available."
class JsonRpcInvalidParamsError(JsonRpcReservedError):
"""
Invalid method parameter(s).
This exception is never thrown by ``sansio-jsonrpc``. It should be thrown in
downstream libraries.
"""
ERROR_CODE = -32602
ERROR_MESSAGE = "Invalid method parameter(s)."
class JsonRpcInternalError(JsonRpcReservedError):
"""
Internal JSON-RPC error.
"""
ERROR_CODE = -32602
ERROR_MESSAGE = "Internal JSON-RPC error."
|
/sansio_jsonrpc-0.2.0-py3-none-any.whl/sansio_jsonrpc/exc.py
| 0.92738 | 0.208421 |
exc.py
|
pypi
|
import typing as t
from pydantic import BaseModel, PrivateAttr
if t.TYPE_CHECKING: # avoid import cycle at runtime
from .client import Client
from .structs import (
JSONDict,
Diagnostic,
MessageType,
MessageActionItem,
CompletionList,
TextEdit,
MarkupContent,
Range,
Location,
MarkedString,
SignatureInformation,
LocationLink,
CallHierarchyItem,
SymbolInformation,
Registration,
DocumentSymbol,
WorkspaceFolder,
ProgressToken,
ProgressValue,
WorkDoneProgressBeginValue,
WorkDoneProgressReportValue,
WorkDoneProgressEndValue,
ConfigurationItem,
)
Id = t.Union[int, str]
class Event(BaseModel):
pass
class ResponseError(Event):
message_id: t.Optional[Id]
code: int
message: str
data: t.Optional[t.Union[str, int, float, bool, t.List[t.Any], JSONDict, None]]
class ServerRequest(Event):
_client: "Client" = PrivateAttr()
_id: Id = PrivateAttr()
class ServerNotification(Event):
pass
class Initialized(Event):
capabilities: JSONDict
class Shutdown(Event):
pass
class ShowMessage(ServerNotification):
type: MessageType
message: str
class ShowMessageRequest(ServerRequest):
type: MessageType
message: str
actions: t.Optional[t.List[MessageActionItem]]
def reply(self, action: t.Optional[MessageActionItem] = None) -> None:
"""
Reply to the ShowMessageRequest with the user's selection.
No bytes are actually returned from this method, the reply's bytes
are added to the client's internal send buffer.
"""
self._client._send_response(
id=self._id, result=action.dict() if action is not None else None
)
class LogMessage(ServerNotification):
type: MessageType
message: str
class WorkDoneProgressCreate(ServerRequest):
token: ProgressToken
def reply(self) -> None:
self._client._send_response(id=self._id, result=None)
class Progress(ServerNotification):
token: ProgressToken
value: ProgressValue
class WorkDoneProgress(Progress):
pass
class WorkDoneProgressBegin(WorkDoneProgress):
value: WorkDoneProgressBeginValue
class WorkDoneProgressReport(WorkDoneProgress):
value: WorkDoneProgressReportValue
class WorkDoneProgressEnd(WorkDoneProgress):
value: WorkDoneProgressEndValue
# XXX: should these two be just Events or?
class Completion(Event):
message_id: Id
completion_list: t.Optional[CompletionList]
# XXX: not sure how to name this event.
class WillSaveWaitUntilEdits(Event):
edits: t.Optional[t.List[TextEdit]]
class PublishDiagnostics(ServerNotification):
uri: str
diagnostics: t.List[Diagnostic]
class Hover(Event):
message_id: t.Optional[Id] # custom...
contents: t.Union[
t.List[t.Union[MarkedString, str]], MarkedString, MarkupContent, str
]
range: t.Optional[Range]
class SignatureHelp(Event):
message_id: t.Optional[Id] # custom...
signatures: t.List[SignatureInformation]
activeSignature: t.Optional[int]
activeParameter: t.Optional[int]
def get_hint_str(self) -> t.Optional[str]:
if len(self.signatures) == 0:
return None
active_sig = self.activeSignature or 0
sig = self.signatures[active_sig]
return sig.label
class Definition(Event):
message_id: t.Optional[Id]
result: t.Union[Location, t.List[t.Union[Location, LocationLink]], None]
# result is a list, so putting in a custom class
class References(Event):
result: t.Union[t.List[Location], None]
class MCallHierarchItems(Event):
result: t.Union[t.List[CallHierarchyItem], None]
class Implementation(Event):
result: t.Union[Location, t.List[t.Union[Location, LocationLink]], None]
class MWorkspaceSymbols(Event):
result: t.Union[t.List[SymbolInformation], None]
class MDocumentSymbols(Event):
message_id: t.Optional[Id]
result: t.Union[t.List[SymbolInformation], t.List[DocumentSymbol], None]
class Declaration(Event):
result: t.Union[Location, t.List[t.Union[Location, LocationLink]], None]
class TypeDefinition(Event):
result: t.Union[Location, t.List[t.Union[Location, LocationLink]], None]
class RegisterCapabilityRequest(ServerRequest):
registrations: t.List[Registration]
def reply(self) -> None:
self._client._send_response(id=self._id, result={})
class DocumentFormatting(Event):
message_id: t.Optional[Id]
result: t.Union[t.List[TextEdit], None]
class WorkspaceFolders(ServerRequest):
result: t.Optional[t.List[WorkspaceFolder]]
def reply(self, folders: t.Optional[t.List[WorkspaceFolder]] = None) -> None:
"""
Reply to the WorkspaceFolder with workspace folders.
No bytes are actually returned from this method, the reply's bytes
are added to the client's internal send buffer.
"""
self._client._send_response(
id=self._id,
result=[f.dict() for f in folders] if folders is not None else None,
)
class ConfigurationRequest(ServerRequest):
items: t.List[ConfigurationItem]
def reply(self, result: t.List[t.Any] = []) -> None:
self._client._send_response(id=self._id, result=result)
|
/sansio_lsp_client-0.10.0-py3-none-any.whl/sansio_lsp_client/events.py
| 0.483161 | 0.183868 |
events.py
|
pypi
|
__all__ = ["parse_form_data"]
from io import BytesIO
from urllib.parse import parse_qs
from .parser import MultipartParser
from .utils import MultiDict, parse_options_header
from .errors import MultipartError
def parse_form_data(environ, charset="utf8", strict=False, **kwargs):
""" Parse form data from an environ dict and return a (forms, files) tuple.
Both tuple values are dictionaries with the form-field name as a key
(unicode) and lists as values (multiple values per key are possible).
The forms-dictionary contains form-field values as unicode strings.
The files-dictionary contains :class:`Part` instances, either
because the form-field was a file-upload or the value is too big to fit
into memory limits.
:param environ: An WSGI environment dict.
:param charset: The charset to use if unsure. (default: utf8)
:param strict: If True, raise :exc:`MultipartError` on any parsing
errors. These are silently ignored by default.
"""
forms, files = MultiDict(), MultiDict()
try:
if environ.get("REQUEST_METHOD", "GET").upper() not in ("POST", "PUT"):
raise MultipartError("Request method other than POST or PUT.")
content_length = int(environ.get("CONTENT_LENGTH", "-1"))
content_type = environ.get("CONTENT_TYPE", "")
if not content_type:
raise MultipartError("Missing Content-Type header.")
content_type, options = parse_options_header(content_type)
stream = environ.get("wsgi.input") or BytesIO()
kwargs["charset"] = charset = options.get("charset", charset)
if content_type == "multipart/form-data":
boundary = options.get("boundary", "")
if not boundary:
raise MultipartError("No boundary for multipart/form-data.")
for part in MultipartParser(stream, boundary, content_length, **kwargs):
if part.filename or not part.is_buffered():
files[part.name] = part
else: # TODO: Big form-fields are in the files dict. really?
forms[part.name] = part.value
elif content_type in (
"application/x-www-form-urlencoded",
"application/x-url-encoded",
):
mem_limit = kwargs.get("mem_limit", 2 ** 20)
if content_length > mem_limit:
raise MultipartError("Request too big. Increase MAXMEM.")
data = stream.read(mem_limit).decode(charset)
if stream.read(1): # These is more that does not fit mem_limit
raise MultipartError("Request too big. Increase MAXMEM.")
data = parse_qs(data, keep_blank_values=True)
for key, values in data.items():
for value in values:
forms[key] = value
else:
raise MultipartError("Unsupported content type.")
except MultipartError:
if strict:
raise
return forms, files
|
/sansio_multipart-0.3.tar.gz/sansio_multipart-0.3/sansio_multipart/wsgi_form_parser.py
| 0.569494 | 0.263469 |
wsgi_form_parser.py
|
pypi
|
__all__ = [
"header_quote",
"header_unquote",
"parse_options_header",
"to_bytes",
"MultiDict",
]
import re
from collections.abc import MutableMapping as DictMixin
_special = re.escape('()<>@,;:"\\/[]?={} \t')
_re_special = re.compile(r"[%s]" % _special)
_quoted_string = r'"(?:\\.|[^"])*"' # Quoted string
_value = r"(?:[^%s]+|%s)" % (_special, _quoted_string) # Save or quoted string
_option = r"(?:;|^)\s*([^%s]+)\s*=\s*(%s)" % (_special, _value)
_re_option = re.compile(_option) # key=value part of an Content-Type like header
def header_quote(val):
if not _re_special.search(val):
return val
return '"' + val.replace("\\", "\\\\").replace('"', '\\"') + '"'
def header_unquote(val, filename=False):
if val[0] == val[-1] == '"':
val = val[1:-1]
if val[1:3] == ":\\" or val[:2] == "\\\\":
val = val.split("\\")[-1] # fix ie6 bug: full path --> filename
return val.replace("\\\\", "\\").replace('\\"', '"')
return val
def parse_options_header(header, options=None):
if ";" not in header:
return header.lower().strip(), {}
content_type, tail = header.split(";", 1)
options = options or {}
for match in _re_option.finditer(tail):
key = match.group(1).lower()
value = header_unquote(match.group(2), key == "filename")
options[key] = value
return content_type, options
def to_bytes(data, encoding="utf8"):
if isinstance(data, str):
data = data.encode(encoding)
return data
class MultiDict(DictMixin):
"""
A dict that remembers old values for each key.
HTTP headers may repeat with differing values,
such as Set-Cookie. We need to remember all
values.
"""
def __init__(self, *args, **kwargs):
self.dict = dict()
for k, v in dict(*args, **kwargs).items():
self[k] = v
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def keys(self):
return self.dict.keys()
def __getitem__(self, key):
return self.get(key, KeyError, -1)
def __setitem__(self, key, value):
self.append(key, value)
def append(self, key, value):
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
self.dict[key] = [value]
def getall(self, key):
return self.dict.get(key) or []
def get(self, key, default=None, index=-1):
if key not in self.dict and default != KeyError:
return [default][index]
return self.dict[key][index]
def iterallitems(self):
for key, values in self.dict.items():
for value in values:
yield key, value
|
/sansio_multipart-0.3.tar.gz/sansio_multipart-0.3/sansio_multipart/utils.py
| 0.541894 | 0.280129 |
utils.py
|
pypi
|
from copy import deepcopy
def remove_none_keys(dict_x):
dict_y = {}
for key, value in iter(dict_x.items()):
if isinstance(value, dict):
value = remove_none_keys(value)
if key is not None:
dict_y[key] = value
return dict_y
def remove_dict_none_values(value, from_dictified_objects_only=False):
"""
Recursively remove all None values from dictionaries and lists, and returns
the result as a new dictionary or list.
"""
def get_non_none_valued_dict(value):
return {
key: remove_dict_none_values(value=val, from_dictified_objects_only=from_dictified_objects_only)
for key, val in value.items()
if val is not None
}
if isinstance(value, list):
return [remove_dict_none_values(value=x, from_dictified_objects_only=from_dictified_objects_only) for x in value]
elif isinstance(value, dict):
if not from_dictified_objects_only:
return get_non_none_valued_dict(value)
else:
from sanskrit_data.schema import common
if common.JSONPICKLE_TYPE_FIELD in dict:
return get_non_none_valued_dict(value)
else:
return value
else:
return value
def stringify_keys(x):
if isinstance(x, dict):
dict_y = {}
for key, value in iter(x.items()):
dict_y[str(key)] = stringify_keys(value)
return dict_y
elif isinstance(x, (tuple, list)):
return [stringify_keys(y) for y in x]
else:
return x
def dictify(x, included_protected_attributes=None, omit_none_values=True):
if included_protected_attributes is None:
included_protected_attributes = ["_id"]
from sanskrit_data.schema.common import JsonObject
if isinstance(x, dict):
dict_y = {}
for key, value in iter(x.items()):
if key is None:
continue
if not key.startswith("_") or key in included_protected_attributes:
if omit_none_values != True or value is not None:
dict_y[key] = dictify(value, included_protected_attributes=included_protected_attributes, omit_none_values=omit_none_values)
return dict_y
elif isinstance(x, (tuple, list)):
return [dictify(y, included_protected_attributes=included_protected_attributes, omit_none_values=omit_none_values) for y in x]
elif isinstance(x, JsonObject):
dict_x = dictify(x.__dict__, included_protected_attributes=included_protected_attributes, omit_none_values=omit_none_values)
from sanskrit_data.schema.common import TYPE_FIELD
dict_x[TYPE_FIELD] = x.get_wire_typeid()
return dict_x
else:
return x
def assert_approx_equals(x, y, floating_point_precision=None, key_trace=None):
if key_trace is None:
key_trace = []
x = sets_to_lists(round_floats(dictify(x), floating_point_precision=floating_point_precision))
y = sets_to_lists(round_floats(dictify(y), floating_point_precision=floating_point_precision))
if isinstance(x, dict):
assert x.keys() == y.keys(), (key_trace, sorted(x.keys()), sorted(y.keys()))
for key, value in iter(x.items()):
other_value = y.get(key, None)
assert_approx_equals(value, other_value, key_trace=key_trace + [key])
elif isinstance(x, (list, tuple)):
assert len(x) == len(y), (key_trace, len(x), len(y))
for index, item in enumerate(x):
assert_approx_equals(item, y[index], key_trace=key_trace + [index])
else:
assert x == y, (key_trace, x, y, type(x))
def round_floats(o, floating_point_precision):
from sanskrit_data.schema.common import JsonObject
if floating_point_precision is None:
return o
elif isinstance(o, float): return round(o, floating_point_precision)
elif isinstance(o, dict): return {k: round_floats(v, floating_point_precision=floating_point_precision) for k, v in iter(o.items())}
elif isinstance(o, (list, tuple)): return [round_floats(x, floating_point_precision=floating_point_precision) for x in o]
elif isinstance(o, JsonObject):
o = deepcopy(o)
for k, v in iter(o.__dict__.items()):
setattr(o, k, round_floats(v, floating_point_precision=floating_point_precision))
return o
def tuples_to_lists(o):
if isinstance(o, dict): return {k: tuples_to_lists(v) for k, v in iter(o.items())}
if isinstance(o, (list, tuple)): return [tuples_to_lists(x) for x in o]
return o
def sets_to_lists(o):
if isinstance(o, dict): return {k: sets_to_lists(v) for k, v in iter(o.items())}
if isinstance(o, set):
l = list(o)
l.sort()
return l
return o
def lists_to_sets(o):
if isinstance(o, dict): return {k: lists_to_sets(v) for k, v in iter(o.items())}
if isinstance(o, list): return set(o)
return o
def flatten_dict(o):
if isinstance(o, dict):
flattened_dict = {}
for key, value in iter(o.items()):
if isinstance(value, dict):
inner = flatten_dict(value)
for key_inner, value_inner in iter(inner.items()):
flattened_dict[".".join([key, key_inner])] = value_inner
else:
flattened_dict[str(key)] = flatten_dict(value)
return flattened_dict
if isinstance(o, (list, tuple)): return [flatten_dict(x) for x in o]
return o
def update_with_lists_as_sets(o1, o2):
# Does not handle lists of lists, lists of dicts etc.
o1 = deepcopy(o1)
for k, v in o2.items():
if v is None:
continue
if k in o1 and o1[k] is not None:
if isinstance(o1[k], (list, tuple)):
# A dict keys are a set which preserve order.
o1[k] = list(dict.fromkeys(o1[k] + o2[k]))
elif isinstance(v, dict): o1[k] = update_with_lists_as_sets(o1[k], o2[k])
else:
o1[k] = o2[k]
else:
o1[k] = o2[k]
return o1
LEAVES_KEY = "_LEAVES"
def insert_to_tree(tree, path, leaf):
segments = [x for x in path.split("/") if x != ""]
if LEAVES_KEY not in tree:
tree[LEAVES_KEY] = []
node = tree
for segment in segments:
parent = node
node = node.get(segment, {LEAVES_KEY: []})
parent[segment] = node
if len(segments) > 0:
parent[segment][LEAVES_KEY].append(leaf)
def tree_maker(leaves, path_fn):
tree = {}
for leaf in leaves:
path = path_fn(leaf)
insert_to_tree(tree=tree, leaf=leaf, path=path)
return tree
def _set_json_object_type(obj):
from sanskrit_data.schema.common import JsonObject
if isinstance(obj, JsonObject):
obj.set_type()
for key, value in iter(obj.__dict__.items()):
_set_json_object_type(value)
elif isinstance(obj, (list, tuple)):
for item in obj:
_set_json_object_type(item)
elif isinstance(obj, dict):
for key_inner, value_inner in obj.items():
_set_json_object_type(value_inner)
def delete_attribute_recursively(obj, attr):
if hasattr(obj, attr):
delattr(obj, attr)
for key, value in iter(obj.__dict__.items()):
delete_attribute_recursively(value, attr)
if isinstance(obj, (list, tuple)):
for item in obj:
delete_attribute_recursively(item, attr)
elif isinstance(obj, dict):
for key_inner, value_inner in obj.items():
delete_attribute_recursively(value_inner, attr)
def _set_jsonpickle_type_recursively(obj, json_class_index):
"""Translates jsonClass fields to py/object"""
if isinstance(obj, dict):
from sanskrit_data.schema.common import TYPE_FIELD
wire_type = obj.pop(TYPE_FIELD, None)
if wire_type:
from sanskrit_data.schema.common import JSONPICKLE_TYPE_FIELD
obj[JSONPICKLE_TYPE_FIELD] = json_class_index[wire_type].__module__ + "." + wire_type
for key, value in iter(obj.items()):
_set_jsonpickle_type_recursively(obj=value, json_class_index=json_class_index)
elif isinstance(obj, (list, tuple)):
for item in obj:
_set_jsonpickle_type_recursively(obj=item, json_class_index=json_class_index)
def filter_for_keys(dict, keys):
out_dict = {}
for key, value in dict.items():
if key in keys:
out_dict[key] = value
return out_dict
|
/sanskrit_data-0.8.13-py3-none-any.whl/sanskrit_data/collection_helper.py
| 0.525612 | 0.326916 |
collection_helper.py
|
pypi
|
import logging
import string
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
class ClientInterface(object):
"""A common interface to a database server or system.
Accessing databases through implementations of this interface enables one to switch databases more easily down the line.
"""
def get_database(self, db_name):
"""Create or get a database, with which one can instantiate a suitable DbInterface subclass.
While it is better to use :meth:`get_database_interface` generally, we expose this in order to support :class:`DbInterface` subclasses which may be defined outside this module.
:param str db_name: Name of the database which needs to be accessed (The database is created if it does not already exist).
:returns DbInterface db: A database interface implementation for accessing this database.
"""
pass
def get_database_interface(self, db_name_backend, db_name_frontend=None, external_file_store=None, db_type=None):
"""Create or get a suitable :class:`DbInterface` subclass.
:param db_name_frontend: An ID for use with the schema.users module, to verify user access to the database as needed.
:param str db_name_backend: Name of the database which needs to be accessed (The database is created if it does not already exist).
:param external_file_store:
:param db_type:
:returns DbInterface db: A database interface implementation for accessing this database.
"""
pass
def delete_database(self, db_name):
"""Delete a database, with which one can instantiate a suitable DbInterface subclass.
:param str db_name: Name of the database which needs to be deleted.
"""
pass
class DbInterface(object):
"""A common interface to a database.
Accessing databases through implementations of this interface enables one to switch databases more easily down the line.
"""
def __init__(self, db_name_frontend, external_file_store):
self.db_name_frontend = db_name_frontend
self.external_file_store = external_file_store
self.init_external_file_store()
def init_external_file_store(self):
# Add filestores for use with the DB.
if self.external_file_store is not None:
logging.info("Initializing work directory ...")
import os
# noinspection PyArgumentList
os.makedirs(name=self.external_file_store, exist_ok=True)
def update_doc(self, doc):
""" Update or insert a json object, represented as a dict.
Where possible, use wrapper methods like :py:meth:`~sanskrit_data.schema.common.JsonObject.update_collection` since they do validation and other setup to ensure data consistency.
:param db_name_frontend:
:param dict doc: _id parameter determines the key. One will be created if it does not exist. This argument could be modified.
:return: updated dict with _id set.
"""
assert isinstance(doc, dict)
pass
def delete_doc(self, doc_id):
"""
Where possible, use wrapper methods like :py:meth:`~sanskrit_data.schema.common.JsonObject.delete_in_collection` since they do validation.
:param doc_id:
:return: Not used.
"""
pass
# noinspection PyShadowingBuiltins
def find_by_id(self, id):
"""
:param id:
:return: Returns None if nothing is found. Else a python dict representing a JSON object.
"""
pass
def find(self, find_filter):
""" Find matching objects from the database.
Should be a generator and return an iterator: ie it should use the yield keyword.
:param dict find_filter: A mango or mongo query.
:return: Returns None if nothing is found. Else a python dict representing a JSON object.
"""
pass
def find_one(self, find_filter):
""" Fine one matching object from the database.
:param find_filter: A mango or mongo query.
:return: Returns None if nothing is found. Else a python dict representing a JSON object.
"""
return next(self.find(find_filter=find_filter), None)
def update_index(self, name, fields, upsert=False):
"""Create or update (if upsert=True) an index over certain fields, with a given name."""
pass
def add_index(self, keys_json, index_name):
"""Index the database using certain fields.
:param index_name:
:param keys_json: A document that contains the field and value pairs where the field is the index key and the value describes the type of index for that field. For an ascending index on a field, specify a value of 1; for descending index, specify a value of -1.
"""
pass
def get_random_string(length):
letters = string.ascii_lowercase
import random
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
|
/sanskrit_data-0.8.13-py3-none-any.whl/sanskrit_data/db/interfaces/__init__.py
| 0.794465 | 0.34834 |
__init__.py
|
pypi
|
from __future__ import absolute_import
import json
import logging
import sys
from copy import deepcopy
import jsonpickle
import jsonschema
import toml
from jsonschema import SchemaError
from jsonschema import ValidationError
from jsonschema.exceptions import best_match
from six import string_types
from toml.decoder import TomlDecodeError
from sanskrit_data import collection_helper, file_helper
from sanskrit_data.collection_helper import round_floats, tuples_to_lists, _set_jsonpickle_type_recursively
from sanskrit_data.toml_helper import MultilinePreferringTomlEncoder
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
JSONPICKLE_TYPE_FIELD = "py/object"
TYPE_FIELD = "jsonClass"
#: Maps jsonClass values to the containing Python module object. Useful for (de)serialization. Updated using :func:`update_json_class_index` calls at the end of each module file (such as this one) whose classes may be serialized.
json_class_index = {}
def update_json_class_index(module_in, json_class_index_in=None):
"""Call this function to enable (de)serialization.
Usage example: common.update_json_class_index(sys.modules[__name__]).
"""
if json_class_index_in is None:
json_class_index_in = json_class_index
import inspect
for name, obj in inspect.getmembers(module_in):
if inspect.isclass(obj):
json_class_index_in[name] = obj
def check_class(obj, allowed_types):
results = [isinstance(obj, some_type) for some_type in allowed_types]
# logging.debug(results)
return True in results
def check_list_item_types(some_list, allowed_types):
check_class_results = [check_class(item, allowed_types=allowed_types) for item in some_list]
# logging.debug(check_class_results)
return not (False in check_class_results)
def recursively_merge_json_schemas(a, b, json_path=""):
assert a.__class__ == b.__class__, str(a.__class__) + " vs " + str(b.__class__)
if isinstance(b, dict) and isinstance(a, dict):
a_and_b = set(a.keys()) & set(b.keys())
every_key = set(a.keys()) | set(b.keys())
merged_dict = {}
for k in every_key:
if k in a_and_b:
merged_dict[k] = recursively_merge_json_schemas(a[k], b[k], json_path=json_path + "/" + k)
else:
merged_dict[k] = deepcopy(a[k] if k in a else b[k])
return merged_dict
elif isinstance(b, list) and isinstance(a, list) and not json_path.endswith(TYPE_FIELD + "/enum"):
# TODO: What if we have a list of dicts?
return list(set(a + b))
else:
return deepcopy(b)
class JsonObject(object):
"""The base class of all Json-serializable data container classes, with many utility methods."""
schema = {
"type": "object",
"$schema": "https://json-schema.org/draft/2019-09/schema",
"properties": {
TYPE_FIELD: {
"type": "string",
"description": "A hint used by json libraries to deserialize json data to an object of the appropriate type."
" This is necessary for sub-objects to have as well (to ensure that the deserialization functions as expected)."
},
},
"required": [TYPE_FIELD]
}
DEFAULT_TO_NONE__DEFAULT = True
def __getattr__(self, name):
if name == "_default_to_none":
return JsonObject.DEFAULT_TO_NONE__DEFAULT
if self._default_to_none:
return None
else:
# Default behaviour
raise AttributeError
# We override this because messing with __getattr__ has resulted in deepcopy breakage.
def __deepcopy__(self, memo):
return self.make_from_dict(self.to_json_map())
def __init__(self):
# Dont do: self._id = None . You'll get "_id": null when the object is serialized.
# We won't do self.set_type() as it is only useful during serialization. We don't want it to unwittingly affect comparison; and we want to avoid unnecessary assignments.
self._default_to_none = JsonObject.DEFAULT_TO_NONE__DEFAULT
def __hash__(self):
return hash(self.__str__())
@classmethod
def make_from_dict(cls, input_dict, **kwargs):
"""Defines *our* canonical way of constructing a JSON object from a dict.
All other deserialization methods should use this.
Note that this assumes that json_class_index is populated properly!
Note that constructor is NOT called and variable initializations therein won't take effect.
- ``from sanskrit_data.schema import *`` before using this should take care of it.
:param input_dict:
:return: A subclass of JsonObject
"""
if input_dict is None:
return None
if TYPE_FIELD not in input_dict:
logging.error("no type field: " + str(input_dict))
raise ValueError(str(input_dict))
dict_without_id = deepcopy(input_dict)
_id = dict_without_id.pop("_id", None)
_set_jsonpickle_type_recursively(obj=dict_without_id, json_class_index=json_class_index)
new_obj = jsonpickle.decode(json.dumps(dict_without_id))
for key, value in kwargs.items():
setattr(new_obj, key, value)
# logging.debug(new_obj.__class__)
if _id:
new_obj._id = str(_id)
return new_obj
@classmethod
def make_from_dict_list(cls, input_dict_list):
assert isinstance(input_dict_list, list)
return [cls.make_from_dict(input_dict=input_dict) for input_dict in input_dict_list]
@classmethod
def make_from_pickledstring(cls, pickle):
input_str = pickle
if not isinstance(pickle, str):
input_str = str(pickle,'utf-8')
if input_str.strip().startswith("["):
return cls.make_from_dict_list(jsonpickle.decode(pickle))
else:
obj = cls.make_from_dict(jsonpickle.decode(pickle))
return obj
def post_load_ops(self):
""" A method which is called everytime an object is loaded via :meth:`JsonObject.read_from_file`.
This may be necessary for deduplication or filling redundant values which were removed during serialization.
:return:
"""
pass
@classmethod
def read_from_file(cls, filename, name_to_json_class_index_extra=None, **kwargs):
"""
:param filename: the file which should be read.
:param name_to_json_class_index_extra: An optional dictionary mapping names to class objects. For example: {"Panchangam": annual.Panchangam}
:return:
"""
if name_to_json_class_index_extra is not None:
json_class_index.update(name_to_json_class_index_extra)
try:
with open(filename) as fhandle:
format = file_helper.deduce_format_from_filename(filename)
data = fhandle.read()
if "json" in format:
input_dict = jsonpickle.decode(data)
elif "toml" in format:
try:
input_dict = toml.loads(data)
# Many bugs above.
except TomlDecodeError as e:
import qtoml
input_dict = qtoml.loads(data)
obj = cls.make_from_dict(input_dict=input_dict, **kwargs)
obj.post_load_ops()
return obj
except Exception as e:
try:
import traceback
traceback.print_exc()
logging.info("Could not load as a dict. May be a list of dicts. Trying..")
with open(filename) as fhandle:
obj = cls.make_from_dict_list(jsonpickle.decode(fhandle.read()))
return obj
except Exception as e:
logging.error("Error reading " + filename + " : ".format(e))
raise e
def dump_to_file(self, filename: str, floating_point_precision: int = None, sort_keys: bool = True) -> None:
try:
import os
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
format = file_helper.deduce_format_from_filename(filename)
f.write(self.to_string(format=format, floating_point_precision=floating_point_precision, sort_keys=sort_keys))
except Exception as e:
logging.error("Error writing " + filename + " : ".format(e))
raise e
@classmethod
def get_wire_typeid(cls):
return cls.__name__
@classmethod
def get_jsonpickle_typeid(cls):
return cls.__module__ + "." + cls.__name__
@classmethod
def get_json_map_list(cls, some_list):
return [item.to_json_map() for item in some_list]
def get_external_storage_path(self, db_interface):
"""Get the directory path where files associated with this object are to be stored."""
import os
return os.path.join(db_interface.external_file_store, self._id)
def list_files(self, db_interface, suffix_pattern="*"):
import glob
import os
file_list = glob.glob(pathname=os.path.join(self.get_external_storage_path(db_interface=db_interface), suffix_pattern))
return [os.path.basename(f) for f in file_list]
def set_type(self):
# self.class_type = str(self.__class__.__name__)
setattr(self, TYPE_FIELD, self.__class__.get_wire_typeid())
# setattr(self, TYPE_FIELD, self.__class__.__name__)
def to_string(self, format="json", floating_point_precision=None, sort_keys=True):
json_map = self.to_json_map(floating_point_precision=floating_point_precision)
if format == "json":
return json.dumps(json_map, sort_keys=sort_keys, ensure_ascii=False, indent=2)
else:
return toml.dumps(json_map, encoder=MultilinePreferringTomlEncoder())
def __repr__(self):
# __str__ falls back to this, and this is used in printing lists.
return self.to_string(format="json")
def set_from_dict(self, input_dict):
if input_dict:
for key, value in iter(input_dict.items()):
if isinstance(value, list):
setattr(self, key,
[JsonObject.make_from_dict(item) if isinstance(item, dict) else item for item in value])
elif isinstance(value, dict):
setattr(self, key, JsonObject.make_from_dict(value))
else:
setattr(self, key, value)
# noinspection PyShadowingBuiltins
def set_from_id(self, db_interface, id):
return self.set_from_dict(db_interface.find_by_id(id=id))
def to_json_map(self, floating_point_precision=None):
"""One convenient way of 'serializing' the object.
So, the type must be properly set.
Many functions accept such json maps, just as they accept strings.
"""
json_map = collection_helper.dictify(self, omit_none_values=self._default_to_none)
if self._default_to_none:
json_map = collection_helper.remove_dict_none_values(json_map)
json_map = tuples_to_lists(json_map)
# Sometimes values may be ugly dicts.
json_map = collection_helper.remove_none_keys(json_map)
json_map = collection_helper.stringify_keys(json_map)
if floating_point_precision is not None:
rounded = round_floats(json_map, floating_point_precision=floating_point_precision)
return rounded
else:
return json_map
def __eq__(self, other):
"""Overrides the default implementation"""
return id(self) == id(other) or (isinstance(other, JsonObject) and self.equals_ignore_id(other=other))
def equals_ignore_id(self, other):
# Makes a unicode copy.
def to_unicode(text):
if isinstance(text, dict):
return {key: to_unicode(value) for key, value in iter(text.items())}
elif isinstance(text, list):
return [to_unicode(element) for element in text]
elif isinstance(text, string_types):
return text.encode('utf-8')
else:
return text
dict1 = to_unicode(self.to_json_map())
dict1.pop("_id", None)
# logging.debug(self.__dict__)
# logging.debug(dict1)
dict2 = to_unicode(other.to_json_map())
dict2.pop("_id", None)
# logging.debug(other.__dict__)
# logging.debug(dict2)
return dict1 == dict2
def match_filter(self, find_filter):
flat_json_map = collection_helper.flatten_dict(self.to_json_map())
for key, value in iter(find_filter.items()):
value_at_key = flat_json_map.get(key, None)
if isinstance(value_at_key, list) and isinstance(value, dict) and value.get("$elemMatch", None) is not None:
jo_values = [JsonObject.make_from_dict(item) for item in value_at_key]
filtered_values = [item for item in jo_values if item.match_filter(value.get("$elemMatch", None))]
if len(filtered_values) == 0:
return False
elif value_at_key != value:
return False
return True
def update_collection(self, db_interface, user=None):
"""Do JSON validation and write to database."""
if getattr(self, "schema", None) is not None:
self.validate(db_interface=db_interface, user=user)
updated_doc = db_interface.update_doc(self.to_json_map())
updated_obj = JsonObject.make_from_dict(updated_doc)
return updated_obj
def validate_deletion(self, db_interface, user=None):
if getattr(self, "_id", None) is None:
raise ValidationError("_id not present!")
def delete_in_collection(self, db_interface, user=None):
"""
To delete referrent items also, use appropriate method in JsonObjectNode.
:param db_interface:
:param user:
:return:
"""
self.validate_deletion(db_interface=db_interface, user=user)
import shutil
db_interface.delete_doc(self._id)
shutil.rmtree(path=self.get_external_storage_path(db_interface=db_interface), ignore_errors=True)
def validate(self, db_interface=None, user=None):
"""Validate the JSON serialization of this object using the schema member. Called before database writes.
:param user:
:param db_interface: Potentially useful in subclasses to perform validations (eg. is the target_id valid).
This value may not be available: for example when called from the from_details methods.
:return: a boolean.
"""
self.validate_schema()
# Override and call this method to add extra validations.
def validate_schema(self):
json_map = self.to_json_map()
json_map.pop("_id", None)
# logging.debug(str(self))
try:
jsonschema.validate(json_map, self.schema)
# Subobjects could have specialized validation rules, specified using validate_schema overrides. Hence we specially call those methods.
for key, value in iter(self.__dict__.items()):
# logging.debug("%s %s", key, value)
if isinstance(value, JsonObject):
value.validate_schema()
elif isinstance(value, list):
json_map[key] = [item.validate_schema() if isinstance(item, JsonObject) else item for item in value]
else:
pass
except SchemaError as e:
logging.error("Exception message: " + e.message)
logging.error("Schema is: " + jsonpickle.dumps(self.schema))
logging.error("Context is: " + str(e.context))
logging.error("Best match is: " + str(best_match(errors=[e])))
raise e
except ValidationError as e:
logging.error("Exception message: " + e.message)
logging.error("self is: " + str(self))
logging.error("Schema is: " + jsonpickle.dumps(self.schema))
logging.error("Context is: " + str(e.context))
logging.error("Best match is: " + str(best_match(errors=[e])))
logging.error("json_map is: " + jsonpickle.dumps(json_map))
raise e
# noinspection PyShadowingBuiltins
@classmethod
def from_id(cls, id, db_interface):
"""Returns None if nothing is found."""
item_dict = db_interface.find_by_id(id=id)
item = None
if item_dict is not None:
item = cls.make_from_dict(item_dict)
return item
@classmethod
def add_indexes(cls, db_interface):
db_interface.add_index(keys_dict={
"jsonClass": 1
}, index_name="jsonClass")
class TargetValidationError(Exception):
def __init__(self, allowed_types, target_obj, targeting_obj):
super(TargetValidationError, self).__init__()
self.allowed_types = allowed_types
self.target_obj = target_obj
self.targeting_obj = targeting_obj
self.message = str(self)
def __str__(self):
return "%s\n targets object \n" \
"%s,\n" \
"which does not belong to \n" \
"%s" % (self.targeting_obj, self.target_obj, str(self.allowed_types))
# noinspection PyProtectedMember,PyUnresolvedReferences
class Target(JsonObject):
schema = recursively_merge_json_schemas(JsonObject.schema, {
"type": "object",
"properties": {
TYPE_FIELD: {
"enum": ["Target"]
},
"container_id": {
"type": "string"
}
},
"required": ["container_id"]
})
def get_target_entity(self, db_interface):
"""Returns null if db_interface doesnt have any such entity."""
return JsonObject.from_id(id=self.container_id, db_interface=db_interface)
def check_target_class(self, db_interface, allowed_types, targeting_obj):
if db_interface is not None:
target_entity = self.get_target_entity(db_interface=db_interface)
if not check_class(obj=target_entity, allowed_types=allowed_types):
raise TargetValidationError(allowed_types=allowed_types, targeting_obj=targeting_obj,
target_obj=target_entity)
@classmethod
def check_target_classes(cls, targets_to_check, db_interface, allowed_types, targeting_obj):
for target in targets_to_check:
target.check_target_class(db_interface=db_interface, allowed_types=allowed_types, targeting_obj=targeting_obj)
@classmethod
def from_details(cls, container_id):
target = Target()
target.container_id = container_id
target.validate()
return target
@classmethod
def from_ids(cls, container_ids):
return [Target.from_details(str(container_id)) for container_id in container_ids]
@classmethod
def from_containers(cls, containers):
return Target.from_ids(container_ids=[container._id for container in containers])
class DataSource(JsonObject):
schema = recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"description": "Source of the json-data which contains this node. Eg. Uploader details in case of books, annotator in case of annotations."
" Consider naming the field that contains this object `source` to make querying uniform.",
TYPE_FIELD: {
"enum": ["DataSource"]
},
"properties": {
"source_type": {
"type": "string",
"enum": ["system_inferred", "user_supplied"],
"description": "Does this data come from a machine, or a human? source_ prefix avoids keyword conflicts in some languages.",
"default": "system_inferred"
},
"id": {
"type": "string",
"description": "Something to identify the particular data source.",
},
"by_admin": {
"type": "boolean",
"description": "Was the creator of this data an admin at the time it was created or updated?"
}
},
"required": ["source_type"]
}))
def __init__(self):
"""Set the default properties"""
super().__init__()
# noinspection PyTypeChecker
self.source_type = self.schema["properties"]["source_type"]["default"]
# noinspection PyShadowingBuiltins
@classmethod
def from_details(cls, source_type, id):
source = DataSource()
source.source_type = source_type
source.id = id
source.validate_schema()
return source
def infer_by_admin(self, db_interface=None, user=None):
if getattr(self, "by_admin", None) is None:
# source_type is a compulsory attribute, because that validation is done separately and a suitable error is thrown.
if getattr(self, "source_type", None) is not None and self.source_type == "user_supplied":
if user is not None and db_interface is not None:
if getattr(self, "id", None) is None or self.id in user.get_user_ids():
self.by_admin = user.is_admin(service=db_interface.db_name_frontend)
def setup_source(self, db_interface=None, user=None):
if getattr(self, "source_type", None) is None:
self.source_type = "user_supplied" if (user is not None and user.is_human()) else "system_inferred"
if getattr(self, "id", None) is None and user is not None and user.get_first_user_id_or_none() is not None:
self.id = user.get_first_user_id_or_none()
def is_id_impersonated_by_non_admin(self, db_interface=None, user=None):
"""A None user is assumed to be a valid authorized backend script."""
if getattr(self, "id", None) is not None and user is not None and db_interface is not None:
if self.id not in user.get_user_ids() and not user.is_admin(service=db_interface.db_name_frontend):
return True
return False
def validate(self, db_interface=None, user=None):
if self.is_id_impersonated_by_non_admin(db_interface=db_interface, user=user):
raise ValidationError("Impersonation by %(id_1)s as %(id_2)s not allowed for this user." % dict(id_1=user.get_first_user_id_or_none(), id_2=self.id))
if "user" in self.source_type and getattr(self, "id", None) is None:
raise ValidationError("User id compulsary for user sources.")
if getattr(self, "source_type", None) is not None and self.source_type == "system_inferred":
if user is not None and user.is_human() and not user.is_admin(service=db_interface.db_name_frontend):
raise ValidationError("Impersonation by %(id_1)s as a bot not allowed for this user." % dict(id_1=user.get_first_user_id_or_none()))
super(DataSource, self).validate(db_interface=db_interface, user=user)
# Only if the writer user is an admin or None, allow by_admin to be set to true (even when the admin is impersonating another user).
if getattr(self, "by_admin", None) is not None and self.by_admin:
if user is not None and db_interface is not None and not user.is_admin(service=db_interface.db_name_frontend):
raise ValidationError("Impersonation by %(id_1)s of %(id_2)s not allowed for this user." % dict(id_1=user.get_first_user_id_or_none(), id_2=self.id))
# source_type is a compulsory attribute, because that validation is done separately and a suitable error is thrown.
if getattr(self, "source_type", None) is not None and self.source_type != "user_supplied":
if user is not None and db_interface is not None:
raise ValidationError("non user_supplied source_type cannot be an admin.")
class UllekhanamJsonObject(JsonObject):
"""The archetype JsonObject for use with the Ullekhanam project. See description.schema field"""
schema = recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"description": "Some JsonObject which can be saved as a document in the ullekhanam database.",
"properties": {
"source": DataSource.schema,
"editable_by_others": {
"type": "boolean",
"description": "Can this annotation be taken over by others for wiki-style editing or deleting?",
"default": True
},
"targets": {
"type": "array",
"items": Target.schema,
"description": "This field lets us define a directed graph involving JsonObjects stored in a database."
}
},
"required": [TYPE_FIELD]
}))
target_class = Target
def is_editable_by_others(self):
# noinspection PyTypeChecker
return self.editable_by_others if getattr(self, "editable_by_others", None) is not None else self.schema["properties"]["editable_by_others"]["default"]
def __init__(self):
super(UllekhanamJsonObject, self).__init__()
self.source = DataSource()
def detect_illegal_takeover(self, db_interface=None, user=None):
if getattr(self, "_id", None) is not None and db_interface is not None:
old_obj = JsonObject.from_id(id=self._id, db_interface=db_interface)
if old_obj is not None and not old_obj.is_editable_by_others():
if getattr(self.source, "id", None) is not None and getattr(old_obj.source, "id", None) is not None and self.source.id != old_obj.source.id:
if user is not None and not user.is_admin(service=db_interface.db_name_frontend):
raise ValidationError("{} cannot take over {}'s annotation for editing or deleting under a non-admin user {}'s authority".format(self.source.id, old_obj.source.id, user.get_first_user_id_or_none))
def update_collection(self, db_interface, user=None):
self.source.setup_source(db_interface=db_interface, user=user)
return super(UllekhanamJsonObject, self).update_collection(db_interface=db_interface, user=user)
def validate_deletion_ignoring_targetters(self, db_interface, user=None):
super(UllekhanamJsonObject, self).validate_deletion(db_interface=db_interface, user=user)
if user is not None:
self.source.id = user.get_first_user_id_or_none()
self.detect_illegal_takeover(db_interface=db_interface, user=user)
def validate_deletion(self, db_interface, user=None):
# Not calling: super(UllekhanamJsonObject, self).validate_deletion(db_interface=db_interface, user=user) as it's called inside the below.
self.validate_deletion_ignoring_targetters(db_interface=db_interface, user=user)
targetting_entities = self.get_targetting_entities(db_interface=db_interface)
if len(targetting_entities) > 0:
raise ValidationError("Unsafe deletion of %s: %d entities refer to this entity. Delete them first" % (self._id, len(targetting_entities)))
@classmethod
def get_allowed_target_classes(cls):
return []
def validate_targets(self, db_interface):
allowed_types = self.get_allowed_target_classes()
targets_to_check = self.targets if getattr(self, "targets", None) is not None else []
Target.check_target_classes(targets_to_check=targets_to_check, db_interface=db_interface, allowed_types=allowed_types, targeting_obj=self)
def validate(self, db_interface=None, user=None):
super(UllekhanamJsonObject, self).validate(db_interface=db_interface, user=user)
self.validate_targets(db_interface=db_interface)
self.source.validate(db_interface=db_interface, user=user)
self.detect_illegal_takeover(db_interface=db_interface, user=user)
# noinspection PyTypeHints
def get_targetting_entities(self, db_interface, entity_type=None):
"""
:type entity_type: str
"""
# Alas, the below shows that no index is used:
# curl -sg vedavaapi.org:5984/vedavaapi_ullekhanam_db/_explain -H content-type:application/json -d '{"selector": {"targets": {"$elemMatch": {"container_id": "4b9f454f5aa5414e82506525d015ac68"}}}}'|jq
# TODO: Use index.
find_filter = {
"targets": {
"$elemMatch": {
"container_id": str(self._id)
}
}
}
targetting_objs = [JsonObject.make_from_dict(item) for item in db_interface.find(find_filter)]
if entity_type is not None:
targetting_objs = list(filter(lambda obj: isinstance(obj, json_class_index[entity_type]), targetting_objs))
return targetting_objs
@classmethod
def add_indexes(cls, db_interface):
super(UllekhanamJsonObject, cls).add_indexes(db_interface=db_interface)
db_interface.add_index(keys_dict={
"targets.container_id": 1
}, index_name="targets_container_id")
# noinspection PyProtectedMember,PyAttributeOutsideInit,PyAttributeOutsideInit,PyTypeChecker
class JsonObjectNode(JsonObject):
"""Represents a tree (not a general Directed Acyclic Graph) of UllekhanamJsonObject.
`A video describing its use <https://youtu.be/neVeKcxzeQI>`_.
"""
schema = recursively_merge_json_schemas(
JsonObject.schema, {
"$id": "JsonObjectNode",
"properties": {
TYPE_FIELD: {
"enum": ["JsonObjectNode"]
},
"content": JsonObject.schema,
"children": {
"type": "array",
"items": {
'type': 'object',
'$ref': "JsonObjectNode"
}
}
},
"required": [TYPE_FIELD]
}
)
def setup_source(self, source):
assert self.content is not None
self.content.source = source
for child in self.children:
child.setup_source(source=source)
def validate_children_types(self):
"""Recursively valdiate target-types."""
for child in self.children:
if not check_class(self.content, child.content.get_allowed_target_classes()):
raise TargetValidationError(targeting_obj=child, allowed_types=child.content.get_allowed_target_classes(),
target_obj=self.content)
for child in self.children:
child.validate_children_types()
def validate(self, db_interface=None, user=None):
super(JsonObjectNode, self).validate(db_interface=db_interface, user=user)
self.validate_children_types()
@classmethod
def from_details(cls, content, children=None):
if children is None:
children = []
node = JsonObjectNode()
# logging.debug(content)
# Strangely, without the backend.data_containers, the below test failed on 20170501
node.content = content
# logging.debug(check_list_item_types(children, [JsonObjectNode]))
node.children = children
node.validate(db_interface=None)
return node
def update_collection(self, db_interface, user=None):
"""Special info: Mutates this object."""
# But we don't call self.validate() as child.content.targets (required of Annotations) mayn't be set.
self.validate_children_types()
# The content is validated within the below call.
self.content = self.content.update_collection(db_interface=db_interface, user=user)
for child in self.children:
# Initialize the target array if it does not already exist.
if (getattr(child.content, "targets", None) is None) or child.content.targets is None or len(child.content.targets) == 0:
child.content.targets = [child.content.target_class()]
assert len(child.content.targets) == 1
child.content.targets[0].container_id = str(self.content._id)
child.update_collection(db_interface=db_interface, user=user)
def affected_user_ids(self):
if getattr(self, "content", None) is None:
raise ValidationError("This is a node with no content! Not allowed.")
user_ids = []
if getattr(self.content.source, "id", None) is not None:
user_ids = [self.content.source.id]
for child in self.children:
user_ids = user_ids + child.affected_user_ids()
return user_ids
def validate_deletion(self, db_interface, user=None):
# Deliberately not calling super.validate_deletion - the node does not exist in the database.
if getattr(self, "content", None) is None:
raise ValidationError("This is a node with no content! Not allowed.")
self.content.validate_deletion_ignoring_targetters(db_interface=db_interface, user=user)
for child in self.children:
child.validate_deletion(db_interface=db_interface, user=user)
self.content = JsonObject.from_id(id = self.content._id, db_interface=db_interface)
affected_users = self.affected_user_ids()
# logging.debug(affected_users)
if len(set(affected_users)) > 2 and not user.is_admin(service=db_interface.db_name_frontend):
raise ValidationError("This deletion affects more than 2 other users. Only admins can do that.")
def delete_in_collection(self, db_interface, user=None):
self.validate_deletion(db_interface=db_interface, user=user)
self.fill_descendents(db_interface=db_interface, depth=100)
for child in self.children:
child.delete_in_collection(db_interface=db_interface, user=user)
# Delete or disconnect children before deleting oneself.
self.content.delete_in_collection(db_interface=db_interface, user=user)
def fill_descendents(self, db_interface, depth=10, entity_type=None):
targetting_objs = self.content.get_targetting_entities(db_interface=db_interface, entity_type=entity_type)
self.children = []
if depth > 0:
for targetting_obj in targetting_objs:
child = JsonObjectNode.from_details(content=targetting_obj)
child.fill_descendents(db_interface=db_interface, depth=depth - 1, entity_type=entity_type)
self.children.append(child)
def recursively_delete_attr(self, field_name):
"""Rarely useful method: example when the schema of a Class changes to omit a field.
Limitation: Only useful with direct members.
"""
if getattr(self.content, field_name, None) is not None:
delattr(self.content, field_name)
for child in self.children:
child.recursively_delete_attr(field_name)
class ScriptRendering(JsonObject):
schema = recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"properties": {
TYPE_FIELD: {
"enum": ["ScriptRendering"]
},
"text": {
"type": "string",
},
"encoding_scheme": {
"type": "string",
},
},
"required": ["text"]
}))
@classmethod
def from_details(cls, text, encoding_scheme=None):
obj = ScriptRendering()
obj.text = text
if encoding_scheme is not None:
obj.encoding_scheme = encoding_scheme
obj.validate()
return obj
class Text(JsonObject):
schema = recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"properties": {
TYPE_FIELD: {
"enum": ["Text"]
},
"script_renderings": {
"type": "array",
"minItems": 1,
"items": ScriptRendering.schema
},
"language_code": {
"type": "string",
},
"search_strings": {
"type": "array",
"items": {
"type": "string"
},
"description": "Search strings which should match this text. "
"It could be derived from script_renderings - "
"by a simple copy (intended for use with a text index) "
"or some intelligent tokenization thereof."
},
}
}))
@classmethod
def from_details(cls, script_renderings, language_code=None):
obj = Text()
obj.script_renderings = script_renderings
if language_code is not None:
obj.language_code = language_code
return obj
@classmethod
def from_text_string(cls, text_string, language_code=None, encoding_scheme=None):
obj = Text()
obj.script_renderings = [ScriptRendering.from_details(text=text_string, encoding_scheme=encoding_scheme)]
if language_code is not None:
obj.language_code = language_code
return obj
class NamedEntity(JsonObject):
"""The same name written in different languages have different spellings - oft due to differing case endings and conventions: kAlidAsaH vs Kalidasa. Hence this container."""
schema = recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"properties": {
TYPE_FIELD: {
"enum": ["NamedEntity"]
},
"names": {
"type": "array",
"items": Text.schema,
"minItems": 1
}
}
}))
@classmethod
def from_details(cls, names):
obj = NamedEntity()
obj.names = names
return obj
@classmethod
def from_name_string(cls, name, language_code=None, encoding_scheme=None):
obj = NamedEntity()
obj.names = [Text.from_text_string(text_string=name, language_code=language_code, encoding_scheme=encoding_scheme)]
return obj
def get_schemas(module_in):
import inspect
schemas = {}
for name, obj in inspect.getmembers(module_in):
if inspect.isclass(obj) and getattr(obj, "schema", None) is not None:
schemas[name] = obj.schema
return schemas
# Essential for depickling to work.
update_json_class_index(sys.modules[__name__])
# logging.debug(json_class_index)
|
/sanskrit_data-0.8.13-py3-none-any.whl/sanskrit_data/schema/common.py
| 0.508788 | 0.159774 |
common.py
|
pypi
|
import logging
import sys
from sanskrit_data.schema import common
from sanskrit_data.schema.common import UllekhanamJsonObject, TYPE_FIELD, JsonObject, Target, DataSource, Text, \
NamedEntity
class BookPositionTarget(Target):
schema = common.recursively_merge_json_schemas(Target.schema, {
"type": "object",
"description": "A BookPortion could represent a Book or a chapter or a verse or a half-verse or a sentence or any such unit.",
"properties": {
TYPE_FIELD: {
"enum": ["BookPositionTarget"]
},
"position": {
"type": "number",
"description": "Any number describing the position of one BookPortion within another."
}
}
})
@classmethod
def from_details(cls, container_id=None, position=None):
target = BookPositionTarget()
if container_id:
target.container_id = container_id
if position:
target.position = position
target.validate(db_interface=None)
return target
class PublicationDetails(JsonObject):
schema = common.recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"description": "Publication details of a BookPortion.",
"properties": {
TYPE_FIELD: {
"enum": ["PublicationDetails"]
},
"release_time": {
"type": "string"
},
"publisher": NamedEntity.schema,
"canonical_source": {
"type": "string",
},
"issue_page": {
"type": "string",
},
}
}))
class CreationDetails(NamedEntity):
"""Many names are possible for the same work (eg. meghasandeshaH vs meghadUtam) - hence we extend the NamedEntity schema."""
schema = common.recursively_merge_json_schemas(NamedEntity.schema, ({
"type": "object",
"properties": {
TYPE_FIELD: {
"enum": ["CreationDetails"]
},
"authors": {
"type": "array",
"items": NamedEntity.schema
}
}
}))
@classmethod
def from_details(cls, names, authors=None):
obj = CreationDetails()
obj.names = names
if authors is not None:
obj.authors = authors
return obj
class BookPortion(UllekhanamJsonObject):
schema = common.recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"description": "A BookPortion could represent a Book or a chapter or a verse or a half-verse or a sentence or any such unit.",
"properties": {
TYPE_FIELD: {
"enum": ["BookPortion"]
},
"creation_details": CreationDetails.schema,
"path": {
"type": "string",
"description": "The path prefix where files are to be stored. "
"If this field is empty, such a path is computed from _id of this object "
"and its ancestors. "
"Ideally, the value stored here should equal the result of this computation "
"- but it may not be the case, especially in the following cases: "
"* Imported books "
"* Moved BookPortions. "
" Once upon a time this field also uniquely identified a BookPortion."
},
"thumbnail_path": {
"type": "string"
},
"base_data": {
"type": "string",
"enum": ["image", "text"]
},
# naming the field that contains this object `source` to make querying uniform (ref Annotation.source).
"source": DataSource.schema,
"publication_details": PublicationDetails.schema,
"portion_class": {
"type": "string",
"description": "book, part, chapter, verse, line etc.."
},
"curated_content": Text.schema,
"editable_by_others": {
"default": False
},
"targets": {
"maxLength": 1,
"items": BookPositionTarget.schema,
"description": (
"Target for BookPortion of which this BookPortion is a part. It is an array only for consistency. "
"For any given BookPortion, one can get the right order of contained BookPortions by seeking all "
"BookPortions referring to it in the targets list, and sorting them by their target.position values.")
}
},
}))
target_class = BookPositionTarget
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion]
@classmethod
def from_details(cls, title, path=None, authors=None, targets=None, base_data=None,
curated_content=None, portion_class=None, publication_details=None, source=None):
if authors is None:
authors = []
book_portion = BookPortion()
book_portion.title = title
book_portion.authors = authors
# logging.debug(str(book_portion))
if path:
book_portion.path = path
targets = targets or []
logging.debug(str(book_portion))
book_portion.targets = targets
if curated_content is not None:
book_portion.curated_content = curated_content
if base_data is not None:
book_portion.base_data = base_data
if portion_class is not None:
book_portion.portion_class = portion_class
if publication_details is not None:
book_portion.publication_details = publication_details
if source is not None:
book_portion.source = source
book_portion.validate()
return book_portion
@classmethod
def from_path(cls, path, db_interface):
book_portion_dict = db_interface.find_one(find_filter={"path": path})
if book_portion_dict is None:
return None
else:
book_portion = JsonObject.make_from_dict(book_portion_dict)
return book_portion
@classmethod
def add_indexes(cls, db_interface):
super(BookPortion, cls).add_indexes(db_interface=db_interface)
db_interface.add_index(keys_dict={
"creation_details.names.script_renderings.text": 1
}, index_name="creation_details_names_script_renderings_text")
db_interface.add_index(keys_dict={
"creation_details.authors.names.script_renderings.text": 1
}, index_name="creation_details_authors_names_script_renderings_text")
db_interface.add_index(keys_dict={
"curated_content.script_renderings.text": 1
}, index_name="curated_content_script_renderings_text")
db_interface.add_index(keys_dict={
"creation_details.names.search_strings": 1
}, index_name="creation_details_names_search_strings")
db_interface.add_index(keys_dict={
"creation_details.authors.names.search_strings": 1
}, index_name="creation_details_authors_names_search_strings")
db_interface.add_index(keys_dict={
"curated_content.search_strings": 1
}, index_name="curated_content_search_strings")
def get_path(self, db_interface):
external_file_store = db_interface.external_file_store
import os
if getattr(self, "path", None) is not None:
return os.path.join(external_file_store, self.path)
elif getattr(self, "targets", None) is not None and len(self.targets) > 0:
container_book = self.targets[0]
return os.path.join(external_file_store, container_book.get_path(db_interface=db_interface), self._id)
def dump_book_portion(self, export_dir, db_interface):
import os
book_node = common.JsonObjectNode.from_details(content=self)
book_node.fill_descendents(db_interface=db_interface, entity_type="BookPortion")
export_dir_destination = os.path.join(export_dir, self._id)
if self.portion_class == "book":
import copy
copied_node = copy.deepcopy(book_node)
copied_node.recursively_delete_attr(field_name="path")
copied_node.dump_to_file(filename=os.path.join(export_dir_destination, "book.json"))
elif self.portion_class == "page":
# Just dump the file.
import shutil
# TODO: Remove this branch once data migration is done.
if getattr(self, "path", None) is not None:
src_file = self.path
# noinspection PyArgumentList
os.makedirs(name=export_dir_destination, exist_ok=True)
shutil.copyfile(os.path.join(db_interface.external_file_store, src_file), os.path.join(export_dir_destination, "content" + os.path.splitext( os.path.basename(src_file))[1]))
else:
for f in self.list_files(db_interface=db_interface):
# noinspection PyArgumentList
os.makedirs(name=export_dir_destination, exist_ok=True)
shutil.copyfile(os.path.join(self.get_external_storage_path(db_interface=db_interface), f), os.path.join(export_dir_destination, os.path.basename(f)))
for sub_portion in book_node.children:
sub_portion.content.dump_book_portion(export_dir=export_dir, db_interface=db_interface)
# Essential for depickling to work.
common.update_json_class_index(sys.modules[__name__])
logging.debug(common.json_class_index)
|
/sanskrit_data-0.8.13-py3-none-any.whl/sanskrit_data/schema/books.py
| 0.625324 | 0.283591 |
books.py
|
pypi
|
import logging
import sys
from sanskrit_data.schema import common
from sanskrit_data.schema.common import JsonObject, recursively_merge_json_schemas, TYPE_FIELD, update_json_class_index
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
class UserPermission(JsonObject):
schema = recursively_merge_json_schemas(
JsonObject.schema, {
"properties": {
TYPE_FIELD: {
"enum": ["UserPermission"]
},
"service": {
"type": "string",
"description": "Allowable values should be predetermined regular expressions."
},
"actions": {
"type": "array",
"items": {
"type": "string",
"enum": ["read", "write", "admin"],
},
"description": "Should be an enum in the future."
},
},
}
)
@classmethod
def from_details(cls, service, actions):
obj = UserPermission()
obj.service = service
obj.actions = actions
return obj
def hash_password(plain_password):
import bcrypt
# (Using bcrypt, the salt is saved into the hash itself)
return bcrypt.hashpw(plain_password.encode(encoding='utf8'), bcrypt.gensalt()).decode(encoding='utf8')
class AuthenticationInfo(JsonObject):
schema = recursively_merge_json_schemas(
JsonObject.schema, {
"properties": {
TYPE_FIELD: {
"enum": ["AuthenticationInfo"]
},
"auth_user_id": {
"type": "string"
},
"auth_provider": {
"type": "string",
"enum": ["google", "vedavaapi"]
},
"auth_secret_bcrypt": {
"type": "string",
"description": "This should be hashed, and merits being stored in a database."
},
"auth_secret_plain": {
"type": "string",
"description": "This should NEVER be set when stored in a database; but is good for client-server transmission purposes."
}
}
}
)
VEDAVAAPI_AUTH = "vedavaapi"
def __str__(self):
return self.auth_provider + "____" + self.auth_user_id
def check_password(self, plain_password):
# Check hased password. Using bcrypt, the salt is saved into the hash itself
import bcrypt
return bcrypt.checkpw(plain_password.encode(encoding='utf8'), self.auth_secret_bcrypt.encode(encoding='utf8'))
@classmethod
def from_details(cls, auth_user_id, auth_provider, auth_secret_hashed=None):
obj = AuthenticationInfo()
obj.auth_user_id = auth_user_id
obj.auth_provider = auth_provider
if auth_secret_hashed:
obj.auth_secret_hashed = auth_secret_hashed
return obj
def set_bcrypt_password(self):
if getattr(self, "auth_secret_plain", None) is not None and self.auth_secret_plain != "":
# noinspection PyAttributeOutsideInit
self.auth_secret_bcrypt = hash_password(plain_password=self.auth_secret_plain)
delattr(self, "auth_secret_plain")
def validate_schema(self):
super(AuthenticationInfo, self).validate_schema()
from jsonschema import ValidationError
self.set_bcrypt_password()
if getattr(self, "auth_secret_hashed", None) is not None and (self.auth_secret_hashed == ""):
raise ValidationError(message="auth_secret_hashed should be non-empty if present.")
class User(JsonObject):
"""Represents a user of our service."""
schema = recursively_merge_json_schemas(
JsonObject.schema, {
"properties": {
TYPE_FIELD: {
"enum": ["User"]
},
"user_type": {
"type": "string",
"enum": ["human", "bot"]
},
"authentication_infos": {
"type": "array",
"items": AuthenticationInfo.schema,
},
"permissions": {
"type": "array",
"items": UserPermission.schema,
},
},
}
)
@classmethod
def from_details(cls, user_type, auth_infos, permissions=None):
obj = User()
obj.authentication_infos = auth_infos
obj.user_type = user_type
if permissions:
obj.permissions = permissions
return obj
def validate_schema(self):
super(User, self).validate_schema()
def check_permission(self, service, action):
def fullmatch(pattern, string, flags=0):
"""Emulate python-3.4 re.fullmatch()."""
import re
return re.match("(?:" + pattern + r")\Z", string, flags=flags)
if getattr(self, "permissions", None) is not None:
for permission in self.permissions:
if fullmatch(pattern=permission.service, string=service):
for permitted_action in permission.actions:
if fullmatch(pattern=permitted_action, string=action):
return True
return False
def is_admin(self, service):
return self.check_permission(service=service, action="admin")
def is_human(self):
return getattr(self, "user_type", None) is not None and self.user_type == "human"
def get_user_ids(self):
return [str(auth_info) for auth_info in self.authentication_infos]
def get_first_user_id_or_none(self):
user_ids = self.get_user_ids()
if len(user_ids) > 0:
return user_ids[0]
else:
return None
# Essential for depickling to work.
update_json_class_index(sys.modules[__name__])
logging.debug(common.json_class_index)
|
/sanskrit_data-0.8.13-py3-none-any.whl/sanskrit_data/schema/users.py
| 0.52342 | 0.226495 |
users.py
|
pypi
|
import logging
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
from sanskrit_data.schema import common
from sanskrit_data.schema.books import BookPortion
from sanskrit_data.schema.common import Text, Target
from sanskrit_data.schema.ullekhanam import Annotation, TextTarget, TextAnnotation
class PadaAnnotation(Annotation):
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "A grammatical pada - subanta or tiNanta.",
"properties": {
common.TYPE_FIELD: {
"enum": ["PadaAnnotation"]
},
"targets": {
"type": "array",
"items": TextTarget.schema
},
"word": Text.schema,
"root": Text.schema,
},
}))
target_class = TextTarget
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, TextAnnotation]
# noinspection PyMethodOverriding
def set_base_details(self, targets, source, word, root):
super(PadaAnnotation, self).set_base_details(targets, source)
# noinspection PyAttributeOutsideInit
self.word = word
# noinspection PyAttributeOutsideInit
self.root = root
@classmethod
def from_details(cls, targets, source, word, root):
annotation = PadaAnnotation()
annotation.set_base_details(targets, source, word, root)
annotation.validate()
return annotation
class SubantaAnnotation(PadaAnnotation):
schema = common.recursively_merge_json_schemas(PadaAnnotation.schema, ({
"type": "object",
"description": "Anything ending with a sup affix. Includes avyaya-s.",
"properties": {
common.TYPE_FIELD: {
"enum": ["SubantaAnnotation"]
},
"linga": {
"type": "string",
"enum": ["strii", "pum", "napum", "avyaya"]
},
"vibhakti": {
"type": "string",
"enum": ["1", "2", "3", "4", "5", "6", "7", "1.sambodhana"]
},
"vachana": {
"type": "integer",
"enum": [1, 2, 3]
}
},
}))
# noinspection PyMethodOverriding
@classmethod
def from_details(cls, targets, source, word, root, linga, vibhakti, vachana):
obj = SubantaAnnotation()
obj.set_base_details(targets, source, word, root)
obj.linga = linga
obj.vibhakti = vibhakti
obj.vachana = vachana
obj.validate()
return obj
class TinantaAnnotation(PadaAnnotation):
schema = common.recursively_merge_json_schemas(PadaAnnotation.schema, ({
"type": "object",
"description": "Anything ending with a tiN affix.",
"properties": {
common.TYPE_FIELD: {
"enum": ["TinantaAnnotation"]
},
"lakAra": {
"type": "string",
"enum": ["laT", "laN", "vidhi-liN", "AshIr-liN", "loT", "liT", "luT", "LT", "luN", "LN", "leT"]
},
"puruSha": {
"type": "string",
"enum": ["prathama", "madhyama", "uttama"]
},
"vachana": {
"type": "integer",
"enum": [1, 2, 3]
}
},
}))
# noinspection PyMethodOverriding
@classmethod
def from_details(cls, targets, source, word, root, lakAra, puruSha, vachana):
obj = TinantaAnnotation()
obj.set_base_details(targets, source, word, root)
obj.lakAra = lakAra
obj.puruSha = puruSha
obj.vachana = vachana
obj.validate()
return obj
class TextSambandhaAnnotation(Annotation):
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "Describes connection between two text portions. Such connection is directional (ie it connects words in a source sentence to words in a target sentence.)",
"properties": {
common.TYPE_FIELD: {
"enum": ["TextSambandhaAnnotation"]
},
"targets": {
"description": "A pair of texts being connected. First text is the 'source text', second is the 'target text'",
},
"category": {
"type": "string"
},
"source_text_padas": {
"type": "array",
"description": "The entity being annotated.",
"items": Target.schema,
"minItems": 1,
},
"target_text_padas": {
"type": "array",
"description": "The entity being annotated.",
"minItems": 1,
"items": Target.schema
}
},
"required": ["combined_string"]
}))
def validate(self, db_interface=None, user=None):
super(TextSambandhaAnnotation, self).validate(db_interface=db_interface, user=user)
Target.check_target_classes(targets_to_check=self.source_text_padas, allowed_types=[PadaAnnotation], db_interface=db_interface, targeting_obj=self)
Target.check_target_classes(targets_to_check=self.target_text_padas, allowed_types=[PadaAnnotation], db_interface=db_interface, targeting_obj=self)
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, TextAnnotation]
class SandhiAnnotation(Annotation):
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"properties": {
common.TYPE_FIELD: {
"enum": ["SandhiAnnotation"]
},
"combined_string": Text.schema,
"sandhi_type": {
"type": "string"
}
},
"required": ["combined_string"]
}))
@classmethod
def get_allowed_target_classes(cls):
return [PadaAnnotation]
@classmethod
def from_details(cls, targets, source, combined_string, sandhi_type="UNK"):
annotation = SandhiAnnotation()
annotation.set_base_details(targets, source)
annotation.combined_string = combined_string
annotation.sandhi_type = sandhi_type
annotation.validate()
return annotation
class SamaasaAnnotation(Annotation):
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"properties": {
common.TYPE_FIELD: {
"enum": ["SamaasaAnnotation"]
},
"component_padas": {
"type": "array",
"description": "Pointers to PadaAnnotation objects corresponding to components of the samasta-pada",
"minItems": 1,
"items": Target.schema
},
"samaasa_type": {
"type": "string"
}
},
}))
@classmethod
def get_allowed_target_classes(cls):
return [PadaAnnotation]
def validate(self, db_interface=None, user=None):
super(SamaasaAnnotation, self).validate(db_interface=db_interface, user=user)
Target.check_target_classes(targets_to_check=self.component_padas, allowed_types=[PadaAnnotation], db_interface=db_interface, targeting_obj=self)
@classmethod
def from_details(cls, targets, source, combined_string, samaasa_type="UNK"):
annotation = SamaasaAnnotation()
annotation.set_base_details(targets, source)
annotation.combined_string = combined_string
annotation.type = samaasa_type
annotation.validate()
return annotation
import sys
# Essential for depickling to work.
common.update_json_class_index(sys.modules[__name__])
logging.debug(common.json_class_index)
|
/sanskrit_data-0.8.13-py3-none-any.whl/sanskrit_data/schema/ullekhanam/sanskrit.py
| 0.714329 | 0.300938 |
sanskrit.py
|
pypi
|
import logging
import sys
from sanskrit_data.schema import common
from sanskrit_data.schema.books import BookPortion, CreationDetails
from sanskrit_data.schema.common import JsonObject, UllekhanamJsonObject, Target, DataSource, Text, NamedEntity
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
class Annotation(UllekhanamJsonObject):
schema = common.recursively_merge_json_schemas(UllekhanamJsonObject.schema, ({
"type": "object",
"properties": {
common.TYPE_FIELD: {
"enum": ["Annotation"]
},
"targets": {
"minItems": 1,
},
},
"required": ["targets", "source"]
}))
def __init__(self):
super(Annotation, self).__init__()
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, Annotation]
def set_base_details(self, targets, source):
# noinspection PyAttributeOutsideInit
self.targets = targets
# noinspection PyAttributeOutsideInit
self.source = source
class Rectangle(JsonObject):
schema = common.recursively_merge_json_schemas(JsonObject.schema, ({
"type": "object",
"description": "A rectangle within an image.",
"properties": {
common.TYPE_FIELD: {
"enum": ["Rectangle"]
},
"x1": {
"type": "integer"
},
"y1": {
"type": "integer"
},
"w": {
"type": "integer"
},
"h": {
"type": "integer"
},
},
"required": ["x1", "y1", "w", "h"]
}))
@classmethod
def from_details(cls, x=-1, y=-1, w=-1, h=-1, score=0.0):
rectangle = Rectangle()
rectangle.x1 = x
rectangle.y1 = y
rectangle.w = w
rectangle.h = h
rectangle.score = score
rectangle.validate()
return rectangle
# Two (segments are 'equal' if they overlap
def __eq__(self, other):
xmax = max(self.x, other.x)
ymax = max(self.y, other.y)
overalap_w = min(self.x + self.w, other.x + other.w) - xmax
overalap_h = min(self.y + self.h, other.y + other.h) - ymax
return overalap_w > 0 and overalap_h > 0
def __ne__(self, other):
return not self.__eq__(other)
# noinspection PyTypeChecker
def __cmp__(self, other):
if self == other:
logging.info(str(self) + " overlaps " + str(other))
return 0
elif (self.y < other.y) or ((self.y == other.y) and (self.x < other.x)):
return -1
else:
return 1
# noinspection PyMethodOverriding
class ImageTarget(Target):
schema = common.recursively_merge_json_schemas(Target.schema, ({
"type": "object",
"description": "The rectangle within the image being targetted.",
"properties": {
common.TYPE_FIELD: {
"enum": ["ImageTarget"]
},
"rectangle": Rectangle.schema
},
"required": ["rectangle"]
}))
# TODO use w, h instead.
# noinspection PyMethodOverriding
@classmethod
def from_details(cls, container_id, rectangle):
target = ImageTarget()
target.container_id = container_id
target.rectangle = rectangle
target.validate()
return target
class ValidationAnnotationSource(DataSource):
"""We don't override the schema here as no new fields are added."""
def setup_source(self, db_interface=None, user=None):
self.infer_by_admin(db_interface=db_interface, user=user)
super(ValidationAnnotationSource, self).setup_source(db_interface=db_interface, user=user)
class ValidationAnnotation(Annotation):
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "Any user can validate a certain annotation (or other object). But it is up to various systems whether such 'validation' has any effect.",
"properties": {
common.TYPE_FIELD: {
"enum": ["ValidationAnnotation"]
},
"source": ValidationAnnotationSource.schema
},
}))
def __init__(self):
super(ValidationAnnotation, self).__init__()
self.source = ValidationAnnotationSource()
class ImageAnnotation(Annotation):
""" Mark a certain fragment of an image.
`An introductory video <https://www.youtube.com/watch?v=SHzD3f5nPt0&t=29s>`_
"""
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "A rectangle within an image, picked by a particular annotation source.",
"properties": {
common.TYPE_FIELD: {
"enum": ["ImageAnnotation"]
},
"targets": {
"type": "array",
"items": ImageTarget.schema
}
},
}))
target_class = ImageTarget
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, ImageAnnotation]
@classmethod
def from_details(cls, targets, source):
annotation = ImageAnnotation()
annotation.set_base_details(targets, source)
annotation.validate()
return annotation
# Targets: ImageAnnotation(s) or TextAnnotation or BookPortion
class TextAnnotation(Annotation):
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "Annotation of some (sub)text from within the object (image or another text) being annotated. Tells: 'what is written in this image? or text portion?",
"properties": {
common.TYPE_FIELD: {
"enum": ["TextAnnotation"]
},
"content": Text.schema,
},
"required": ["content"]
}))
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, ImageAnnotation]
@classmethod
def from_details(cls, targets, source, content):
annotation = TextAnnotation()
annotation.set_base_details(targets, source)
annotation.content = content
annotation.validate()
return annotation
@classmethod
def add_indexes(cls, db_interface):
super(TextAnnotation, cls).add_indexes(db_interface=db_interface)
db_interface.add_index(keys_dict={
"content.search_strings": 1
}, index_name="content_search_strings")
class CommentAnnotation(TextAnnotation):
schema = common.recursively_merge_json_schemas(TextAnnotation.schema, ({
"description": "A comment that can be associated with nearly any Annotation or BookPortion.",
"properties": {
common.TYPE_FIELD: {
"enum": ["CommentAnnotation"]
},
}
}))
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, Annotation]
class TranslationAnnotation(TextAnnotation):
schema = common.recursively_merge_json_schemas(TextAnnotation.schema, ({
"description": "A comment that can be associated with nearly any Annotation or BookPortion.",
"properties": {
common.TYPE_FIELD: {
"enum": ["TranslationAnnotation"]
},
}
}))
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, Annotation]
class QuoteAnnotation(TextAnnotation):
schema = common.recursively_merge_json_schemas(TextAnnotation.schema, ({
"description": "A quote, a memorable text fragment.",
"properties": {
common.TYPE_FIELD: {
"enum": ["QuoteAnnotation"]
},
"editable_by_others": {
"default": False
},
}
}))
@classmethod
def get_allowed_target_classes(cls):
return [BookPortion, Annotation]
class Metre(NamedEntity):
schema = common.recursively_merge_json_schemas(NamedEntity.schema, ({
"type": "object",
"properties": {
common.TYPE_FIELD: {
"enum": ["Metre"]
}
}
}))
class MetreAnnotation(Annotation):
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"description": "A metre, which may be ",
"properties": {
common.TYPE_FIELD: {
"enum": ["MetreAnnotation"]
},
"metre": Metre.schema
}
}))
class TextOffsetAddress(JsonObject):
schema = common.recursively_merge_json_schemas(JsonObject.schema, {
"type": "object",
"description": "A way to specify a substring.",
"properties": {
common.TYPE_FIELD: {
"enum": ["TextOffsetAddress"]
},
"start": {
"type": "integer"
},
"end": {
"type": "integer"
}
}})
@classmethod
def from_details(cls, start, end):
obj = TextOffsetAddress()
obj.start = start
obj.end = end
obj.validate()
return obj
class TextTarget(Target):
schema = common.recursively_merge_json_schemas(Target.schema, ({
"type": "object",
"description": "A way to specify a particular substring within a string.",
"properties": {
common.TYPE_FIELD: {
"enum": ["TextTarget"]
},
"shabda_id": {
"type": "string",
"description": "Format: pada_index.shabda_index or just pada_index."
"Suppose that some shabda in 'rāgādirogān satatānuṣaktān' is being targetted. "
"This has the following pada-vigraha: rāga [comp.]-ādi [comp.]-roga [ac.p.m.] satata [comp.]-anuṣañj [ac.p.m.]."
"Then, rāga has the id 1.1. roga has id 1.3. satata has the id 2.1."
},
"offset_address": TextOffsetAddress.schema
},
}))
@classmethod
def from_details(cls, container_id, shabda_id=None, offset_address=None):
target = TextTarget()
target.container_id = container_id
if shabda_id is not None:
target.shabda_id = shabda_id
if offset_address is not None:
target.offset_address = offset_address
target.validate()
return target
# noinspection PyMethodOverriding
# Targets: TextTarget pointing to TextAnnotation
# noinspection PyMethodOverriding
# noinspection PyMethodOverriding,PyPep8Naming
# Targets: a pair of textAnnotation or BookPortion objects
# Targets: two or more PadaAnnotations
# Targets: one PadaAnnotation (the samasta-pada)
class OriginAnnotation(Annotation):
"""See schema.description."""
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "A given text may be quoted from some other book. This annotation helps specify such origin.",
"properties": {
common.TYPE_FIELD: {
"enum": ["OriginAnnotation"]
},
"originDetails": CreationDetails.schema,
},
}))
class Topic(NamedEntity):
schema = common.recursively_merge_json_schemas(NamedEntity.schema, ({
"type": "object",
"properties": {
common.TYPE_FIELD: {
"enum": ["Topic"]
}
}
}))
class TopicAnnotation(Annotation):
"""See schema.description."""
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "A given text may be quoted from some other book. This annotation helps specify such origin.",
"properties": {
common.TYPE_FIELD: {
"enum": ["TopicAnnotation"]
},
"topic": Topic.schema,
},
}))
class RatingAnnotation(Annotation):
"""See schema.description."""
schema = common.recursively_merge_json_schemas(Annotation.schema, ({
"type": "object",
"description": "A given text may be quoted from some other book. This annotation helps specify such origin.",
"properties": {
common.TYPE_FIELD: {
"enum": ["RatingAnnotation"]
},
"rating": {
"type": "number"
},
"editable_by_others": {
"type": "boolean",
"description": "Can this annotation be taken over by others for wiki-style editing or deleting?",
"default": False
}
},
}))
# Essential for depickling to work.
common.update_json_class_index(sys.modules[__name__])
logging.debug(common.json_class_index)
|
/sanskrit_data-0.8.13-py3-none-any.whl/sanskrit_data/schema/ullekhanam/__init__.py
| 0.632162 | 0.266027 |
__init__.py
|
pypi
|
from __future__ import print_function
from indic_transliteration import sanscript
from indic_transliteration import detect
from sanskrit_parser.util import normalization
from contextlib import contextmanager
import logging
import six
logger = logging.getLogger(__name__)
denormalize = False
class SanskritString(object):
""" Sanskrit String Class: Base of the class hierarchy
Attributes:
thing(str) : thing to be represented
encoding(str): SanskritBase encoding of thing as passed (eg: sanscript.HK, sanscript.DEVANAGARI)
Args:
thing(str): As above
encoding(str): As above
"""
def __init__(self, thing, encoding=None, unicode_encoding='utf-8'):
assert isinstance(thing, six.string_types)
# Encode early, unicode everywhere, decode late is the philosophy
# However, we need to accept both unicode and non unicode strings
# We are udAramatiH
if isinstance(thing, six.text_type):
self.thing = thing
else:
self.thing = six.text_type(thing, unicode_encoding)
if encoding is None:
# Autodetect Encoding
encoding = detect.detect(self.thing)
if encoding != sanscript.SLP1:
# Convert to SLP1
self.thing = sanscript.transliterate(self.thing, encoding, sanscript.SLP1)
# At this point, we are guaranteed that internal
# representation is in SLP1
self._canonical = None
def transcoded(self, encoding=None, strict_io=True):
""" Return a transcoded version of self
Args:
encoding(SanskritObject.Scheme):
Returns:
str: transcoded version
"""
s = self.thing
if not strict_io:
s = normalization.denormalize(s)
return sanscript.transliterate(s, sanscript.SLP1, encoding)
def canonical(self, strict_io=True):
""" Return canonical transcoding (SLP1) of self
"""
if self._canonical is None:
self._canonical = self.transcoded(sanscript.SLP1, strict_io)
return self._canonical
def devanagari(self, strict_io=True):
""" Return devanagari transcoding of self
"""
return self.transcoded(sanscript.DEVANAGARI, strict_io)
# Updates internal string, leaves everything else alone
# Not to be used in all cases, as this is very limited
def update(self, s, encoding=None):
self.thing = s
if encoding is not None:
self.encoding = encoding
self._canonical = None
def __str__(self):
if self._canonical is not None:
return self._canonical
else:
global denormalize
s = self.transcoded(sanscript.SLP1)
if denormalize:
s = normalization.denormalize(s)
self._canonical = s
return s
def __repr__(self):
return str(self)
def __getitem__(self, i):
return self.canonical()[i]
def __len__(self):
return len(self.canonical())
class SanskritImmutableString(SanskritString):
""" Immutable version of SanskritString
"""
def __init__(self, thing=None, encoding=None, unicode_encoding='utf-8'):
super().__init__(thing, encoding, unicode_encoding)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
class SanskritNormalizedString(SanskritString):
""" SanskritString plus Normalization of input
"""
def __init__(self, thing=None, encoding=None, unicode_encoding='utf-8',
strict_io=True, replace_ending_visarga='s'):
super().__init__(thing, encoding, unicode_encoding)
if not strict_io:
# Normalize
logger.debug("Before normalization: %s", self.thing)
tmp = normalization.normalize(self.thing)
if replace_ending_visarga == 's':
self.thing = normalization.replace_ending_visarga_s(tmp)
elif replace_ending_visarga == 'r':
self.thing = normalization.replace_ending_visarga_r(tmp)
else:
self.thing = tmp
# Lazy Anusvaras (see issue #103)
try:
self.thing = sanscript.SCHEMES[sanscript.SLP1].fix_lazy_anusvaara(self.thing)
except (NameError, AttributeError):
print("Not fixing lazy anusvaras, you probably have an older version of indic_transliteration")
logger.debug("After normalization: %s", self.thing)
class SanskritObject(SanskritNormalizedString):
""" Sanskrit Object Class: Derived From SanskritString
Attributes:
"""
def __init__(self, thing=None, encoding=None, unicode_encoding='utf-8',
strict_io=True, replace_ending_visarga='s'):
super().__init__(thing, encoding, unicode_encoding, strict_io, replace_ending_visarga)
# Tags will go here as
self.tags = []
def setMorphologicalTags(self, t):
""" Set Morphological Tags on Sanskrit Object
Params:
t (list): List of morphological tags
"""
self.tags.extend(t)
return self.tags
def getMorphologicalTags(self):
""" Morphological Tags on object """
return self.tags
@contextmanager
def outputctx(strict_io):
global denormalize
save_denormalize = denormalize
denormalize = not strict_io
yield
denormalize = save_denormalize
if __name__ == "__main__":
import argparse
def getArgs():
"""
Argparse routine.
Returns args variable
"""
# Parser Setup
parser = argparse.ArgumentParser(description='SanskritObject')
# String to encode
parser.add_argument('data', nargs="?", type=str, default="idam adbhutam")
# Input Encoding (autodetect by default)
parser.add_argument('--input-encoding', type=str, default=None)
# Output Encoding (Devanagari by default)
parser.add_argument('--output-encoding', type=str, default="Devanagari")
return parser.parse_args()
def main():
args = getArgs()
print(args.data)
if args.input_encoding is None:
ie = None
else:
ie = args.input_encoding
oe = args.output_encoding
s = SanskritObject(args.data, ie)
print(s.transcoded(oe))
main()
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/base/sanskrit_base.py
| 0.772917 | 0.214455 |
sanskrit_base.py
|
pypi
|
from __future__ import print_function
from indic_transliteration import sanscript
from . import sanskrit_base
import re
import six
class MaheshvaraSutras(object):
"""
Singleton MaheshvaraSutras class
Attributes:
MS(SanskritImmutableString) : Internal representation of mAheshvara sutras
MSS(str) : Canonical (SLP1) representation
"""
def __init__(self):
"""
Initialize Maheshvara Sutras object
"""
# Note that a space is deliberately left after each it to help in
# demarcating them.
self.MS = sanskrit_base.SanskritImmutableString(
u'अइउण् ऋऌक् एओङ् ऐऔच् हयवरट् लण् ञमङणनम् झभञ् घढधष् जबगडदश् खफछठथचटतव् कपय् शषसर् हल् ',
sanscript.DEVANAGARI)
# SLP1 version for internal operations
self.MSS = self.MS.canonical()
def __str__(self):
# Use SLP1 for default string output
return self.MSS
def getPratyahara(self, p, longp=True, remove_a=False, dirghas=False):
"""
Return list of varnas covered by a pratyahara
Args:
p(:class:SanskritImmutableString): Pratyahara
longp(boolean :optional:): When True (default), uses long pratyaharas
remove_a(boolean :optional:): When True, removes intermediate 'a'.This is better for computational use
dirghas(boolean :optional:) When True (default=False) adds dirgha vowels to the returned varnas
Returns:
(SanskritImmutableString): List of varnas to the same encoding as p
"""
# SLP1 encoded pratyahara string
ps = p.canonical()
# it - halantyam
pit = ps[-1]
# Non it - all except it
pnit = ps[:-1]
# Non it position
pnpos = self.MSS.find(pnit)
# It position - space added to match it marker in internal
# representation
if longp: # Find last occurence of it
pitpos = self.MSS.rfind(pit + ' ', pnpos)
else: # Find first occurence of it
pitpos = self.MSS.find(pit + ' ', pnpos)
if pitpos == -1:
raise ValueError(f'pratyaahaara {ps} not found in "{self.MSS}". Please recheck input')
# Substring. This includes intermediate its and spaces
ts = self.MSS[pnpos:pitpos]
# Replace its and spaces
ts = re.sub('. ', '', ts)
# Remove अकारः मुखसुखार्थः
if remove_a:
ts = ts[0] + ts[1:].replace('a', '')
# Add dIrgha vowels if requested
if dirghas:
ts = ts.replace('a', 'aA').replace('i', 'iI').replace('u', 'uU').replace('f', 'fF').replace('x', 'xX')
return sanskrit_base.SanskritImmutableString(ts, sanscript.SLP1)
def isInPratyahara(self, p, v, longp=True):
"""
Checks whether a given varna is in a pratyahara
Args:
p(SanskritImmutableString): Pratyahara
v(SanskritImmutableString): Varna
longp(boolean :optional:): When True (default), uses long pratyaharas
Returns
boolean: Is v in p?
"""
vs = v.canonical()
# १ . १ . ६९ अणुदित् सवर्णस्य चाप्रत्ययः
# So, we change long and pluta vowels to short ones in the input string
# Replace long vowels with short ones (note SLP1 encoding)
vs = re.sub('[AIUFX]+', lambda m: m.group(0).lower(), vs)
# Remove pluta
vs = vs.replace('3', '')
# Convert Pratyahara into String
# the 'a' varna needs special treatment - we remove the
# अकारः मुखसुखार्थः before searching!
pos = self.getPratyahara(p, longp, remove_a=vs[0] == 'a').canonical()
# Check if varna String is in Pratyahara String
return (pos.find(vs) != -1)
def isSavarna(self, v, a):
"""
Checks whether a given varna "a" is savarna to another "v"
Args:
v(SanskritImmutableString): Varna Indicator
a(SanskritImmutableString): Varna
v can be a svara (in which case we return True irrespective of length
of a)
v can be an udit, in which we return True for anything in the group
v can be tapara in which we return true only for the right length
Returns
boolean: Is v savarna to p?
"""
ac = a.canonical()
vc = v.canonical()
# Single
if len(vc) == 1:
# १ . १ . ६९ अणुदित् सवर्णस्य चाप्रत्ययः
# So, we change long and pluta vowels to short ones in the input string
# Replace long vowels with short ones (note SLP1 encoding)
ac = re.sub('[AIUFX]+', lambda m: m.group(0).lower(), ac)
# Remove pluta
ac = ac.replace('3', '')
vc = re.sub('[AIUFX]+', lambda m: m.group(0).lower(), vc)
# Remove pluta
vc = vc.replace('3', '')
return ac == vc
elif vc[-1] == "t":
# taparastatkAlasya
return ac == vc[:-1]
# FIXME implment tkArsya para interpretation
elif vc[-1] == "u":
# १ . १ . ६९ अणुदित् सवर्णस्य चाप्रत्ययः
if vc[0] == "k":
vc = "kKgGN"
elif vc[0] == "c":
vc = "cCjJY"
elif vc[0] == "w":
vc = "wWqQR"
elif vc[0] == "t":
vc = "tTdDn"
elif vc[0] == "p":
vc = "pPbBm"
return ac in vc
else:
return ac in vc
if __name__ == "__main__":
import argparse
def getArgs():
"""
Argparse routine.
Returns args variable
"""
# Parser Setup
parser = argparse.ArgumentParser(description='SanskritImmutableString')
# Pratyahara - print out the list of varnas in this
parser.add_argument('--pratyahara', type=str, default="ik")
# Varna. Optional. Check if this varna is in pratyahara above
parser.add_argument('--varna', type=str, default=None)
# Encoding Optional
parser.add_argument('--encoding', type=str, default=None)
# Short pratyaharas
parser.add_argument('--short', action='store_true')
# Remove intermediate as
parser.add_argument('--remove-a', action='store_true')
# Include dIrghas when returning the pratyAhAra
parser.add_argument('--dirghas', action='store_true', default=False)
parser.add_argument('--output-slp1', action='store_true')
return parser.parse_args()
def main():
args = getArgs()
m = MaheshvaraSutras()
print(m)
if args.encoding is not None:
e = args.encoding
else:
e = None
p = sanskrit_base.SanskritImmutableString(args.pratyahara, e)
longp = not args.short
if args.output_slp1 is False:
print(six.text_type(p.devanagari()))
print(six.text_type(m.getPratyahara(p, longp, args.remove_a, args.dirghas).devanagari()))
else:
print(six.text_type(p.canonical()))
print(six.text_type(m.getPratyahara(p, longp, args.remove_a, args.dirghas).canonical()))
if args.varna is not None:
v = sanskrit_base.SanskritImmutableString(args.varna, e)
print(u"Is {} in {}?".format(v.devanagari(),
p.devanagari()))
print(m.isInPratyahara(p, v, longp))
main()
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/base/maheshvara_sutra.py
| 0.641984 | 0.228307 |
maheshvara_sutra.py
|
pypi
|
from indic_transliteration import sanscript
from sanskrit_parser.base.sanskrit_base import SanskritImmutableString
from decimal import Decimal
from copy import deepcopy
from sanskrit_parser.generator.paninian_object import PaninianObject
import logging
logger = logging.getLogger(__name__)
# Global Domains
class GlobalDomains(object):
def __init__(self):
self.domains = {
"saMjYA": True,
"upadeSa": False,
"prakfti": False,
"pratyaya": False,
"aNga": False,
"pada": False,
"saMhitA": False,
"standard": False
}
def isdomain(self, d):
return self.domains[d]
def set_domain(self, d):
for k in self.domains:
if k == d:
self.domains[k] = True
else:
self.domains[k] = False
def active_domain(self):
r = []
for k in self.domains:
if self.domains[k]:
r.append(k)
return r
# Base class
class Sutra(object):
def __init__(self, name, aps, optional=False, overrides=None):
if isinstance(name, str):
self.name = SanskritImmutableString(name)
else:
self.name = name
if isinstance(aps, str):
self.aps = aps # Adhaya.pada.sutra
aps_l = aps.split(".")
aps_t = [int(_x) for _x in aps_l]
if len(aps_l) > 3: # Subsutra/Vartikam
aps_sub = Decimal("0."+str(aps_t[-1]))
else:
aps_sub = 0
self._aps_tuple = aps_t
elif isinstance(aps, tuple):
aps_t = aps
self._aps_tuple = aps_t
self.aps = '.'.join([str(x) for x in list(aps_t)])
self._aps_num = aps_t[2]+aps_t[1]*1000+aps_t[0]*10000 + aps_sub
self.overrides = overrides
self.optional = optional
logger.info(f"Initialized {self}: {self._aps_num} Optional:{self.optional}")
def __str__(self):
if self.optional:
_o = "*"
else:
_o = ""
return f"{self.aps:7}: {str(self.name)} {_o}"
class LRSutra(Sutra):
def __init__(self, name, aps, cond, xform, insert=None, domain=None,
update=None, optional=False, bahiranga=1, overrides=None):
'''
Sutra Class that expects a left and right input
'''
super().__init__(name, aps, optional, overrides)
self.domain = domain
self.cond = cond
self.xform = xform
self.update_f = update
self.insertx = insert
self.bahiranga = bahiranga # Bahiranga score. Smaller wins
def inAdhikara(self, context):
return self.adhikara(context)
def isTriggered(self, s1, s2, domains):
logger.debug(f"Checking {self} View: {s1} {s2}")
env = _env(s1, s2)
if self.domain is not None:
t = self.domain(domains)
else:
t = domains.isdomain("standard")
if self.cond is not None:
c = self.cond(env)
else:
c = True
logger.debug(f"Check Result {c and t} for {self}")
return c and t
def update(self, s1, s2, o1, o2, domains):
env = _env(s1, s2)
env["olp"] = o1
env["orp"] = o2
if self.update_f is not None:
self.update_f(env, domains)
return env["olp"], env["orp"]
def operate(self, s1, s2):
# We take the string tuple returned, and update s1, s2
rs1 = deepcopy(s1)
rs2 = deepcopy(s2)
if self.xform is not None:
env = _env(s1, s2)
ret = self.xform(env)
rs1.update(ret[0], sanscript.SLP1)
rs2.update(ret[1], sanscript.SLP1)
return rs1, rs2
def insert(self, s1, s2):
if self.insertx is not None:
env = _env(s1, s2)
itx = self.insertx(env)
r = [s1, s2]
for i in itx:
if not isinstance(itx[i], PaninianObject):
assert isinstance(itx[i], str)
itx[i] = PaninianObject(itx[i])
r.insert(i, itx[i])
logger.debug(f"After insertion {r}")
return r
else:
return(s1, s2)
def _env(s1, s2):
# Helper function to define execution environment
env = {}
env["lp"] = s1
env["rp"] = s2
if s1.canonical() == "":
env["l"] = SanskritImmutableString("")
else:
env["l"] = SanskritImmutableString(s1.canonical()[-1], sanscript.SLP1)
if s2.canonical() == "":
env["r"] = SanskritImmutableString("")
else:
env["r"] = SanskritImmutableString(s2.canonical()[0], sanscript.SLP1)
if len(s1.canonical()) > 1:
env["ll"] = SanskritImmutableString(s1.canonical()[-2], sanscript.SLP1)
env["lc"] = SanskritImmutableString(s1.canonical()[:-1], sanscript.SLP1)
else:
env["ll"] = SanskritImmutableString("")
env["lc"] = SanskritImmutableString("")
if len(s2.canonical()) > 1:
env["rr"] = SanskritImmutableString(s2.canonical()[1], sanscript.SLP1)
env["rc"] = SanskritImmutableString(s2.canonical()[1:], sanscript.SLP1)
else:
env["rr"] = SanskritImmutableString("", sanscript.SLP1)
env["rc"] = SanskritImmutableString("", sanscript.SLP1)
return env
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/generator/sutra.py
| 0.618665 | 0.178383 |
sutra.py
|
pypi
|
from indic_transliteration import sanscript
from sanskrit_parser.base.sanskrit_base import SanskritObject
import logging
logger = logging.getLogger(__name__)
class PaninianObject(SanskritObject):
""" Paninian Object Class: Derived From SanskritObject
Attributes:
"""
def __init__(self, thing=None, encoding=None, unicode_encoding='utf-8',
strict_io=True, replace_ending_visarga='s'):
super().__init__(thing, encoding, unicode_encoding, strict_io,
replace_ending_visarga)
self.inPrakriya = True
# FIXME: I don't like this being here
self.disabled_sutras = []
# Prakriya Related Tags are ephemeral
def hasTag(self, t):
return t in self.tags
def deleteTag(self, t):
return self.tags.remove(t)
def setTag(self, t):
if t not in self.tags:
self.tags.append(t)
return t
def fix(self):
self.inPrakriya = False
def isPada(self):
return self.hasTag("pada")
@classmethod
def join_objects(cls, objects):
logger.debug(f"Joining Objects {objects} {type(objects)}")
for o in objects[0]:
logger.debug(f"{o} type {type(o)}")
assert isinstance(o, SanskritObject), f"{o} type {type(o)}"
s = "".join([o.canonical() for o in objects[0]])
so = PaninianObject(s, encoding=sanscript.SLP1)
# Tag rules
# 1.4.14 suptiNantaM padam
if objects[0][-1].hasTag("sup") or objects[0][-1].hasTag("tiN"):
so.setTag("pada")
# 1.4.13 yasmAtpratyayaviDistadAdipratyayeNgam
elif objects[0][0].hasTag("aNga"):
so.setTag("aNga")
# 3.1.32 sannAdyantA dhAtavaH
if objects[0][-1].hasTag("sannAdi"):
so.setTag("DAtu")
# 1.2.46 krttaDitasamAsAsca
if objects[0][-1].hasTag("krt") or objects[0][-1].hasTag("tadDita"):
so.setTag("prAtipadika")
# Custom tag propagation for rule implementation
for t in ["eti", "eDati", "UW", "sTA", "sTamB"]:
if objects[0][0].hasTag(t) and objects[0][0].hasTag("DAtu"):
so.setTag(t)
for t in ["AN"]:
if objects[0][0].hasTag(t) and objects[0][0].hasTag("upasarga"):
so.setTag(t)
for t in ["trc", "trn"]:
if objects[0][-1].hasTag(t) and objects[0][0].hasTag("aNga"):
so.setTag(t)
for t in ["NI", "Ap", 'strI_abs']:
if objects[0][-1].hasTag(t):
so.setTag("strI")
so.setTag(t)
for tt in objects[0][0].tags:
so.setTag(tt)
if so.hasTag("pum"):
so.deleteTag("pum")
if so.hasTag("napum"):
so.deleteTag("napum")
return so
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/generator/paninian_object.py
| 0.403214 | 0.23444 |
paninian_object.py
|
pypi
|
from sanskrit_parser.generator.sutra import GlobalDomains
from sanskrit_parser.generator.paninian_object import PaninianObject
from copy import deepcopy, copy
import logging
logger = logging.getLogger(__name__)
class PrakriyaVakya(object):
"""
Prakriya Vakya class
Start with associated prakriti + pratyayas
Assemble into padas
Handle Pratyaya/String Agama / Lopa
Inputs:
v = list.
Elements of v can be PaninianObjects or
lists thereof
Internal storage:
- List of lists of PaninianObject objects
"""
def __init__(self, v):
# Deepcopy is required because we add tags to objects
# during prakriya, and we do not want predefined objects getting
# tags.
self.v = deepcopy(list(v))
def need_hierarchy_at(self, ix):
return not _isScalar(self.v[ix])
def copy_replace_at(self, ix, r):
vc = PrakriyaVakya(self.v)
# As above, deepcopy to prevent predefined objects getting tags
vc.v[ix] = deepcopy(r)
return vc
def copy_insert_at(self, ix, r):
vc = PrakriyaVakya(self.v)
# As above, deepcopy to prevent predefined objects getting tags
vc.v.insert(ix, deepcopy(r))
return vc
def replace_at(self, ix, r):
# As above, deepcopy to prevent predefined objects getting tags
self.v[ix] = deepcopy(r)
return self
def insert_at(self, ix, r):
# As above, deepcopy to prevent predefined objects getting tags
self.v.insert(ix, deepcopy(r))
return self
def __getitem__(self, ix):
return self.v[ix]
def __len__(self):
return len(self.v)
def __print__(self):
return [str(x) for x in self.v]
def __repr__(self):
return str([str(x) for x in self.v])
class Prakriya(object):
"""
Prakriya Class
Inputs:
sutra_list: list of Sutra objects
inputs : PrakriyaVakya object
"""
def __init__(self, sutra_list, inputs):
self.sutra_list = sutra_list
self.pre_inputs = deepcopy(inputs)
self.inputs = copy(inputs)
self.hier_prakriyas = []
self.need_hier = False
# List of alternatives
# Used only in hierarchy is needed
self.hier_inputs = [self.inputs]
# Assmeble hierarchical prakriya outputs into a single list
self.hier_outputs = [[] for x in self.inputs]
# Scan inputs for hierarchical prakriya needs
for ix in range(len(self.inputs)):
if self.inputs.need_hierarchy_at(ix):
self.need_hier = True
# hierarchy needed here
hp = Prakriya(sutra_list,
PrakriyaVakya(self.inputs[ix]))
self.hier_prakriyas.append(hp)
# This will execute hierarchically as needed
hp.execute()
hpo = hp.output()
self.hier_outputs[ix] = hpo # accumulate hierarchical outputs
self.tree = PrakriyaTree()
if self.need_hier:
for ix, ol in enumerate(self.hier_outputs):
if ol != []: # Hierarchy exists here
tmpl = []
# For each alternate output
for o in ol:
hobj = PaninianObject.join_objects([o])
# Assemble exploded list with each input
# replaced by multiple alternates at this
# index
for i in self.hier_inputs:
tmpl.append(i.copy_replace_at(ix, hobj))
# Replace input list with exploded list
# Explosion at position ix is now dealt with
self.hier_inputs = tmpl
logger.debug(f"Hier inputs after expl {ix} {self.hier_inputs}")
# At the end of the loop above self.hier_inputs has been fully exploded
for i in self.hier_inputs:
_n = PrakriyaNode(i, i, "Prakriya Hierarchical Start")
self.tree.add_node(_n, root=True)
else:
_n = PrakriyaNode(self.inputs, self.inputs, "Prakriya Start")
self.tree.add_node(_n, root=True)
self.outputs = []
self.domains = GlobalDomains()
self.disabled_sutras = []
# Sliding window counter
self.windowIdx = 0
# pUrvaparanityAntaraNgApavAdAnamuttarottaraM balIyaH
def sutra_priority(self, sutras: list):
def _winner(s1, s2):
logger.debug(f"{s1} overrides {s1.overrides}")
logger.debug(f"{s2} overrides {s2.overrides}")
# Apavada
if (s2.overrides is not None) and (s1.aps in s2.overrides):
logger.debug(f"{s2} overrides {s1}")
return s2
elif (s1.overrides is not None) and (s2.aps in s1.overrides):
logger.debug(f"{s1} overrides {s2}")
return s1
# Nitya
# Antaranga
elif (s1.bahiranga < s2.bahiranga):
logger.debug(f"{s1} antaranga {s2}")
return s1
elif (s2.bahiranga < s1.bahiranga):
logger.debug(f"{s2} antaranga {s1}")
return s2
# samjYA before 1.4.2 vipratizeDe param kAryam
elif (s1._aps_num < 14000) or (s2._aps_num < 14000):
logger.debug(f"SaMjYA, lower of {s1} {s2}")
if s1._aps_num < s2._aps_num:
return s1
else:
return s2
# Also handles if one sutra is spsp and one tp
elif (s1._aps_num > 82000) or (s2._aps_num > 82000):
logger.debug(f"Tripadi, lower of {s1} {s2}")
if s1._aps_num < s2._aps_num:
return s1
else:
return s2
else:
# Para > purva
logger.debug(f"Sapadasaptapadi, higher of {s1} {s2}")
if s1._aps_num > s2._aps_num:
return s1
else:
return s2
_s = sutras
w = _s[0]
for s in _s[1:]:
w = _winner(w, s)
return w
def view(self, s, node, ix=0):
"""
Current view as seen by sutra s
"""
# Wrapper for special "siddha" situations
def _special_siddha(a1, a2):
# zqutva is siddha for q lopa
if (a1 == 84041) and (a2 == 83013):
return True
# q, r lopa siddha for purvadirgha
elif ((a1 == 83013) or (a1 == 83014)) and (a2 == 63111):
return True
else:
return False
if s is not None:
aps_num = s._aps_num
else:
aps_num = 0
# Default view
_l = self.inputs
if node is None:
# logger.debug(f"View {l} {node}")
return _l
if aps_num < 82000:
# FIXME: Only Sapadasaptapadi implemented.
# Need to implement asiddhavat, zutvatokorasiddhaH
# Can see the entire sapadasaptapadi
_n = node
while (self.tree.parent[_n] is not None) and \
((_n.sutra._aps_num > 82000)
and not _special_siddha(_n.sutra._aps_num, aps_num)):
_n = self.tree.parent[_n]
_l = _n.outputs
else:
# Asiddha
# Can see all outputs of sutras less than oneself
_n = node
while (self.tree.parent[_n] is not None) and \
((_n.sutra._aps_num > aps_num)
and not _special_siddha(_n.sutra._aps_num, aps_num)):
_n = self.tree.parent[_n]
_l = _n.outputs
if ix > (len(_l)-2):
# Someone has inserted something this sutra can't see
logger.debug(f"Unseen insertion? {s} {_l} {ix}")
ix = len(_l) - 2
return _l[ix:ix+2]
def _exec_single(self, node):
l = self.sutra_list # noqa: E741
# Sliding window, check from left
for ix in range(len(node.outputs)-1):
logger.debug(f"Disabled Sutras at window {ix} {[s for s in node.outputs[ix].disabled_sutras]}")
triggered = []
triggered = [s for s in l if ((s.aps not in node.outputs[ix].disabled_sutras)
and s.isTriggered(*self.view(s, node, ix), self.domains))]
# Break at first index from left where trigger occurs
if triggered:
_ix = ix
break
logger.debug(f"I: {node.id} {node.outputs} {[_r.tags for _r in node.outputs]} ")
if triggered:
ix = _ix
logger.debug(f"Triggered rules at window {ix}")
for t in triggered:
logger.debug(t)
s = self.sutra_priority(triggered)
v = self.view(s, node, ix)
logger.debug(f"Sutra {s} View {v} Disabled: {[s for s in v[0].disabled_sutras]}")
assert s.aps not in v[0].disabled_sutras
# Transformation
r = s.operate(*v)
r0 = r[0]
v0 = v[0]
# State update
r = s.update(*v, *r, self.domains)
r = s.insert(*r)
logger.debug(f"I (post update): {node.id} {node.outputs} {[_r.tags for _r in node.outputs]} ")
logger.debug(f"I (post update): {v}")
# Using sutra id in the disabled list to get round paninian object deepcopy
r0.disabled_sutras.append(s.aps)
if s.optional:
# Prevent optional sutra from executing on the same node again
v0.disabled_sutras.append(s.aps)
# Overridden sutras disabled
if s.overrides is not None:
for so in l:
if so.aps in s.overrides:
r0.disabled_sutras.append(so.aps)
if s.optional:
# Prevent optional sutra's overridden sutras from executing on the same node again
v0.disabled_sutras.append(so.aps)
logger.debug(f"Disabling overriden {so}")
# FIXME: disable sutras for AkaqArAdekA saMjYA
logger.debug(f"O: {r} {[_r.tags for _r in r]} Disabled: {[[s for s in _r.disabled_sutras] for _r in r]}")
# Update Prakriya Tree
# Craft inputs and outputs based on viewed inputs
# And generated outputs
pnv = node.outputs.copy_replace_at(ix, v[0]).copy_replace_at(ix+1, v[1])
pnr = node.outputs.copy_replace_at(ix, r[0]).copy_replace_at(ix+1, r[1])
if len(r) > 2:
for i in range(len(r)-2):
pnr = pnr.copy_insert_at(ix+i+2, r[i+2])
_ps = PrakriyaNode(pnv, pnr, s, ix, [t for t in triggered if t != s])
logger.debug(f'O Node: {str(_ps)}')
if node is not None:
self.tree.add_child(node, _ps, opt=s.optional)
else:
self.tree.add_node(_ps, root=True)
return r
else:
logger.debug(f"Domain {self.domains.active_domain()} - Nothing triggered")
return False
def _exec_all_domains(self, node):
for d in ["saMjYA", "prakfti", "pratyaya", "aNga", "standard", "pada", "saMhitA"]:
self.domains.set_domain(d)
r = self._exec_single(node)
if r:
return r
# Only if nothing ever triggered
return False
def execute(self):
if self.need_hier:
logger.debug(f"Input: {self.hier_inputs}")
else:
logger.debug(f"Input: {self.inputs}")
done = []
act = False
# Initial run on input
for r in self.tree.get_root():
_act = self._exec_all_domains(r)
act = act or _act
if (act):
# Iterate over leaves if something triggered
while (act):
act = False
for n in self.tree.get_leaves():
if n not in done:
res = self._exec_all_domains(n)
if not res:
done.append(n)
else:
act = True
for n in self.tree.get_leaves():
assert n in done
self.outputs.append(n.outputs)
else:
# Nothing triggered
logger.debug("Nothing Triggered - Passthrough")
for n in self.tree.get_root():
self.outputs.append(n.outputs)
r = self.outputs
logger.debug(f"Final Result: {r}\n")
return r
def output(self, copy=False):
if copy:
return deepcopy(self.outputs)
else:
return self.outputs
def describe(self):
print("\nPrakriya")
if self.hier_prakriyas != []:
print(f"Pre Input {self.pre_inputs}")
for h in self.hier_prakriyas:
print("Hierarchical Prakriya")
h.describe()
print(f"Input {self.inputs}")
self.tree.describe()
print(f"Final Output {self.outputs} = {[''.join([str(x) for x in y]) for y in self.outputs]}\n\n")
def dict(self):
return self.tree.dict()
_node_id = 0
class PrakriyaNode(object):
"""
Prakriya History Node
Inputs
inputs : list of Paninian Objects
outputs: list of Paninian Objects
sutra_id: id for triggered sutra
other_sutras: sutras that were triggered, but did not win.
"""
def __init__(self, inputs, outputs, sutra, ix=0, other_sutras=[]):
global _node_id
self.id = _node_id
_node_id = _node_id+1
self.inputs = inputs
self.outputs = outputs
self.sutra = sutra
self.other_sutras = other_sutras
self.index = ix
def __str__(self):
return f"{self.id} {self.sutra} {self.inputs} {self.index}-> {self.outputs}"
def __hash__(self):
return hash("{self.sutra} {self.inputs} {self.index}-> {self.outputs}")
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def describe(self):
print("Prakriya Node")
print(str(self))
if self.other_sutras:
print("Sutras that were tiggered but did not win")
for s in self.other_sutras:
print(str(s))
print("End")
def dict(self):
return {
'id': self.id,
'sutra': str(self.sutra),
'inputs': self.inputs,
'outputs': self.outputs,
'window': self.index,
'other_sutras': [str(s) for s in self.other_sutras]
}
class PrakriyaTree(object):
"""
Prakriya Tree: Tree of PrakriyaNodes
"""
def __init__(self, node=None):
self.children = {}
self.parent = {}
self.leaves = []
self.roots = []
if node is not None:
self.add_node(node)
def add_node(self, node, root=False):
self.children[node] = []
self.leaves.append(node)
if root:
self.parent[node] = None
self.roots.append(node)
def get_leaves(self):
return self.leaves
def get_root(self):
return self.roots
def add_child(self, node, c, opt=False):
self.children[node].append(c)
assert (c not in self.parent), f"Duplicated {c}, hash{c}"
self.parent[c] = node
if c not in self.children:
self.add_node(c)
if (not opt) and (node in self.leaves):
self.leaves.remove(node)
def describe(self):
def _desc(n):
n.describe()
if n in self.leaves:
print("Leaf Node")
for c in self.children[n]:
print("Child")
_desc(c)
for r in self.roots:
print("Root")
_desc(r)
def dict(self):
def _dict(n):
d = n.dict()
d['children'] = [_dict(c) for c in self.children[n]]
return d
return {
'root': _dict(self.roots[0])
}
def _isScalar(x):
# We do not expect np arrays or other funky nonscalars here
return not (isinstance(x, list) or isinstance(x, tuple))
if __name__ == "__main__":
from sanskrit_parser.generator.pratyaya import * # noqa: F401, F403
from sanskrit_parser.generator.dhatu import * # noqa: F401, F403
from sanskrit_parser import enable_console_logger
from sandhi_yaml import sutra_list
from argparse import ArgumentParser
def getArgs():
"""
Argparse routine.
Returns args variable
"""
# Parser Setup
parser = ArgumentParser(description='Prakriya Utility')
# Input Encoding (autodetect by default)
parser.add_argument('inputs', type=str, nargs="*",
default=None)
return parser.parse_args()
def main(args):
enable_console_logger()
p = Prakriya(sutra_list, PrakriyaVakya(args.inputs))
p.execute()
p.describe
main(getArgs())
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/generator/prakriya.py
| 0.536799 | 0.15393 |
prakriya.py
|
pypi
|
import logging
from tinydb import TinyDB, Query
from sanskrit_parser.base.sanskrit_base import SanskritImmutableString
from sanskrit_parser.util.data_manager import data_file_path
class DhatuWrapper(object):
"""
Class to interface with the kRShNamAchArya dhAtupATha
https://github.com/sanskrit-coders/stardict-sanskrit/tree/master/sa-vyAkaraNa/dhAtu-pATha-kRShNAchArya
"""
db_file = "dhAtu-pATha-kRShNAchArya.json"
q = Query()
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
db_path = data_file_path(self.db_file)
self.db = TinyDB(db_path, access_mode='r')
assert len(self.db.all()) != 0
def _get_dhatus(self, d):
""" Get all tags for a dhatu d """
if d is None:
return None
else:
return self.db.search(self.q.DAtuH == d)
def is_sakarmaka(self, d):
""" Is d sakarmaka? """
# Tags
tl = self._get_dhatus(d)
if len(tl) != 0:
supported_karmakas = {'sakarmakaH', 'dvikarmakaH'}
return any([t['karmakatvaM'] in supported_karmakas for t in tl])
else:
self.logger.debug("Couldn't find dhatu {} in database".format(d))
return False
def is_dvikarmaka(self, d):
""" Is d dvikarmaka? """
# Tags
tl = self._get_dhatus(d)
if len(tl) != 0:
supported_karmakas = {'dvikarmakaH'}
return any([t['karmakatvaM'] in supported_karmakas for t in tl])
else:
self.logger.debug("Couldn't find dhatu {} in database".format(d))
return False
if __name__ == "__main__":
from argparse import ArgumentParser
def getArgs():
"""
Argparse routine.
Returns args variable
"""
# Parser Setup
parser = ArgumentParser(description='Dhatu Wrapper')
# String to encode
parser.add_argument('dhatu', nargs="?", type=str, default="kf")
# Input Encoding (autodetect by default)
parser.add_argument('--input-encoding', type=str, default=None)
parser.add_argument('--tags', type=str,
choices=["all", u'DAtuH', u'mUlaDAtuH',
u'DAtvarTaH', u'gaRaH', u'karmakatvaM',
u'iwtvaM', u'padam-upagrahaH', u'rUpam'],
default=u'karmakatvaM')
parser.add_argument('--debug', action='store_true')
return parser.parse_args()
def main():
args = getArgs()
print("Input Dhatu:", args.dhatu)
if args.debug:
logging.basicConfig(filename='DhatuWrapper.log', filemode='w', level=logging.DEBUG)
else:
logging.basicConfig(filename='DhatuWrapper.log', filemode='w', level=logging.INFO)
logger = logging.getLogger(__name__)
if args.input_encoding is None:
ie = None
else:
ie = args.input_encoding
i = SanskritImmutableString(args.dhatu, encoding=ie)
it = i.canonical()
print("Input String in SLP1:", it)
logger.info("Input String in SLP1: {}".format(it))
w = DhatuWrapper(logger=logger)
if args.tags == "all":
res = w._get_dhatus(it)
else:
res = map(lambda x: x[args.tags], w._get_dhatus(it))
print(res)
print("Is {} sakarmaka?: {}".format(it, w.is_sakarmaka(it)))
logger.info("Reported {}".format(res))
main()
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/util/DhatuWrapper.py
| 0.52074 | 0.205615 |
DhatuWrapper.py
|
pypi
|
import pickle
import sqlite3
import logging
from collections import namedtuple
from sanskrit_parser.base.sanskrit_base import SanskritImmutableString
from sanskrit_parser.util.lexical_lookup import LexicalLookup
from sanskrit_parser.util.inriatagmapper import inriaTagMapper
from sanskrit_parser.util.data_manager import data_file_path
_db = namedtuple('_db', ['db_file', 'tags', 'stems', 'buf'])
class InriaXMLWrapper(LexicalLookup):
"""
Class to interface with the INRIA XML database released
by Prof. Gerard Huet
https://gitlab.inria.fr/huet/Heritage_Resources
"""
'''
The custom database format has two parts:
1. A pickle file that contains a list of stems,
a list of tags, and a serialized buffer of the
indices of stems and tags for each form. The indices
are used as it is more efficient to store such integers
instead of the string for each tag.
2. An sqlite file that maps each form to the position
within the buffer that contains the serialized tuple
of stems and tags for that form. An sqlite database
is used to avoid having to build a huge dict in
memory for the 600K forms that are present in this db,
which consumes a lot of memory. (See
https://github.com/kmadathil/sanskrit_parser/issues/151)
To lookup the tag for a form, we use the sqlite db to find the
position in the buffer, deserialize the data at that position,
which gives us a list of the tag set for that form. For each
item in that list, we then lookup the right stem and tag in
the list of stems and tags loaded from the pickle file
'''
def __init__(self, logger=None):
self.pickle_file = "inria_forms.pickle"
self.logger = logger or logging.getLogger(__name__)
db_file = data_file_path("inria_forms_pos.db")
pkl_path = data_file_path("inria_stems_tags_buf.pkl")
self.db = self._load_db(db_file, pkl_path)
@staticmethod
def _load_db(db_file, pkl_path):
with open(pkl_path, 'rb') as f:
stems = pickle.load(f)
tags = pickle.load(f)
buf = f.read()
db = _db(db_file, tags, stems, buf)
return db
def _get_tags(self, word):
db = self.db
conn = sqlite3.connect(db.db_file)
cursor = conn.cursor()
res = cursor.execute('SELECT * FROM forms WHERE form=?', (word,)).fetchone()
if res is None:
return None
pos = res[1]
tag_index_list = pickle.loads(db.buf[pos:])
tags = []
for tag_index in tag_index_list:
tags.append(self._decode_tags(tag_index, db.tags, db.stems))
return tags
@staticmethod
def _decode_tags(tag_index, tags, stems):
t = [tags[x] for x in tag_index[1]]
stem = stems[tag_index[0]]
return (stem, set(t))
def valid(self, word):
conn = sqlite3.connect(self.db.db_file)
cursor = conn.cursor()
res = cursor.execute('SELECT COUNT(1) FROM forms WHERE form = ?', (word,)).fetchone()
return res[0] > 0
def get_tags(self, word, tmap=True):
tags = self._get_tags(word)
if tmap and (tags is not None):
tags = inriaTagMapper(tags)
return tags
if __name__ == "__main__":
from argparse import ArgumentParser
def getArgs():
"""
Argparse routine.
Returns args variable
"""
# Parser Setup
parser = ArgumentParser(description='Interface to INRIA XML database')
# Input Encoding (autodetect by default)
parser.add_argument('--input-encoding', type=str, default=None)
parser.add_argument('--loglevel', type=str, default="info",
help="logging level. Can be any level supported by logging module")
parser.add_argument('word', nargs='?', type=str,
default=None,
help="Word to look up")
parser.add_argument('--no-map-tags', dest='map_tags',
action='store_false')
return parser.parse_args()
def main():
args = getArgs()
if args.input_encoding is None:
ie = None
else:
ie = args.input_encoding
if args.loglevel:
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
logging.basicConfig(level=numeric_level)
word_in = SanskritImmutableString(args.word, encoding=ie).canonical()
xmlDB = InriaXMLWrapper()
print("Getting tags for", word_in)
tags = xmlDB.get_tags(word_in, tmap=args.map_tags)
if tags is not None:
for t in tags:
print(t)
main()
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/util/inriaxmlwrapper.py
| 0.545286 | 0.352118 |
inriaxmlwrapper.py
|
pypi
|
from sanskrit_parser.util.inriaxmlwrapper import InriaXMLWrapper
from sanskrit_parser.util.sanskrit_data_wrapper import SanskritDataWrapper
from sanskrit_parser.util.lexical_lookup import LexicalLookup
import logging
def _merge_tags(tags):
''' Merge tags from multiple sources
Inputs
tags: List of elements of form (baseword, tagset)
Outputs
list of elements of form (baseword, tagset), with
tagsets properly merged
'''
tdict = {}
# Convert to a dict of sets for proper set union
for t in tags:
base = t[0]
if base not in tdict:
tdict[base] = {frozenset(t[1])}
else:
ttags = frozenset(t[1])
rs = set()
add = True
for tbs in tdict[base]:
if ttags == tbs: # Already in there
break
elif ttags.issuperset(tbs): # Superset, add and remove subset
rs.add(tbs)
elif ttags.issubset(tbs): # Subset, don't add
add = False
break
if rs:
tdict[base].difference_update(rs)
if add:
tdict[base].add(ttags)
tlist = []
# Convert back to list of tuples
for base in tdict:
tlist.extend([(base, set(s)) for s in tdict[base]])
return tlist
class CombinedWrapper(LexicalLookup):
def __init__(self, logger=None):
self.inria = LexicalLookupFactory.create("inria")
self.sanskrit_data = LexicalLookupFactory.create("sanskrit_data")
self.logger = logger or logging.getLogger(__name__)
def valid(self, word):
return self.inria.valid(word) or self.sanskrit_data.valid(word)
def get_tags(self, word, tmap=True):
tags = self.inria.get_tags(word, tmap) or []
sanskrit_data_tags = self.sanskrit_data.get_tags(word, tmap)
if sanskrit_data_tags is not None:
tags.extend(sanskrit_data_tags)
tags = _merge_tags(tags)
if tags == []:
return None
else:
return tags
class LexicalLookupFactory(object):
@staticmethod
def create(name):
if name == "inria":
return InriaXMLWrapper()
if name == "sanskrit_data":
return SanskritDataWrapper()
if name == "combined":
return CombinedWrapper()
raise Exception("invalid type", name)
|
/sanskrit_parser-0.2.6.tar.gz/sanskrit_parser-0.2.6/sanskrit_parser/util/lexical_lookup_factory.py
| 0.439026 | 0.235834 |
lexical_lookup_factory.py
|
pypi
|
import io
import json
import logging
from dataclasses import dataclass
from enum import IntEnum
import requests
from indic_transliteration.sanscript.schemes import VisargaApproximation
from pydub import AudioSegment
from .base import TTSBase
from .util import transliterate_text
class BhashiniVoice(IntEnum):
FEMALE1 = 0
MALE1 = 1
FEMALE2 = 2
@dataclass
class BhashiniTTS(TTSBase):
url: str = "https://tts.bhashini.ai/v1/synthesize"
voice: BhashiniVoice = BhashiniVoice.FEMALE2
api_key: str = None
def synthesize(
self, text: str, input_encoding: str = None, visarga_approximation: int = VisargaApproximation.H
) -> AudioSegment:
response = self._synthesis_response(text, input_encoding, visarga_approximation)
audio = AudioSegment.from_file(io.BytesIO(response.content))
return audio
def _synthesis_response(
self, text: str, input_encoding: str = None, visarga_approximation: int = VisargaApproximation.H
):
text = transliterate_text(
text, input_encoding=input_encoding, visarga_approximation=visarga_approximation
)
headers = {"accept": "audio/mpeg"}
if self.api_key is not None:
headers["X-API-KEY"] = self.api_key
data = {"languageId": "kn", "voiceId": self.voice.value, "text": text}
try:
response = requests.post(self.url, headers=headers, json=data)
response.raise_for_status()
except Exception as e:
logging.info(json.dumps(data, indent=1, ensure_ascii=False))
raise
return response
@dataclass
class BhashiniProxy(TTSBase):
url: str = "https://sanskrit-tts-306817.appspot.com/v1/synthesize/"
voice: BhashiniVoice = BhashiniVoice.FEMALE2
def synthesize(
self, text: str, input_encoding: str = None, visarga_approximation: int = VisargaApproximation.H
) -> AudioSegment:
data = {
"text": text,
"input_encoding": input_encoding,
"voice": self.voice.value,
"visarga_approximation": visarga_approximation
}
response = requests.post(self.url, json=data)
response.raise_for_status()
audio = AudioSegment.from_file(io.BytesIO(response.content))
return audio
|
/sanskrit_tts-0.0.5.tar.gz/sanskrit_tts-0.0.5/sanskrit_tts/bhashini_tts.py
| 0.45641 | 0.172729 |
bhashini_tts.py
|
pypi
|
#: All legal sounds, including anusvara, ardhachandra, and Vedic `'L'`.
from builtins import map
from builtins import zip
ALL_SOUNDS = frozenset("aAiIuUfFxXeEoOMHkKgGNcCjJYwWqQRtTdDnpPbBmyrlLvSzsh'~")
#: All legal tokens, including sounds, punctuation (`'|'`), and whitespace.
ALL_TOKENS = ALL_SOUNDS | {'|', ' ', '\n'}
#: All vowels.
VOWELS = frozenset('aAiIuUfFxXeEoO')
#: Short vowels.
SHORT_VOWELS = frozenset('aiufx')
#: Stop consonants.
STOPS = frozenset('kKgGcCjJwWqQtTdDpPbB')
#: Nasals.
NASALS = frozenset('NYRnm')
#: Semivowels.
SEMIVOWELS = frozenset('yrlLv')
#: Savarga
SAVARGA = frozenset('Szsh')
#: Consonants.
CONSONANTS = STOPS.union(NASALS).union(SEMIVOWELS).union(SAVARGA)
#: Valid word-final sounds.
VALID_FINALS = frozenset('aAiIuUfeEoOkwtpNnmsr')
# General functions
# -----------------
def clean(phrase, valid):
"""Remove all characters from `phrase` that are not in `valid`.
:param phrase: the phrase to clean
:param valid: the set of valid characters. A sensible default is
`sounds.ALL_TOKENS`.
"""
return ''.join([L for L in phrase if L in valid])
def key_fn(s):
"""Sorting function for Sanskrit words in SLP1."""
sa = "aAiIuUfFxXeEoOMHkKgGNcCjJYwWqQRtTdDnpPbBmyrlvSzsh '~"
en = "123ABCDEFGHIJKLMNOPQRSTUVWabcdefghijklmnopqrstuvwxyz"
mapper = dict(zip(sa, en))
mapped = map(mapper.__getitem__, [x for x in s if x in ALL_SOUNDS])
return ''.join(mapped)
# Letter transformations
# ----------------------
def letter_transform(name, docstring=None):
data = {
'shorten': dict(zip('AIUFX', 'aiufx')),
'lengthen': dict(zip('aiufx', 'AIUFX')),
'semivowel': dict(zip('iIuUfFxXeEoO',
'y y v v r r l l ay Ay av Av'.split())),
'aspirate': dict(zip('kgcjwqtdpb',
'KGCJWQTDPB')),
'deaspirate': dict(zip('KGCJWQTDPB',
'kgcjwqtdpb')),
'voice': dict(zip('kKcCwWtTpP',
'gGjJqQdDbB')),
'devoice': dict(zip('gGjJqQdDbB',
'kKcCwWtTpP')),
'nasalize': dict(zip('kKgGhcCjJwWqQtTdDpPbB',
'NNNNNYYYYRRRRnnnnmmmm')),
'dentalize': dict(zip('wWqQRz',
'tTdDns')),
'retroflex': dict(zip('tTdDns',
'wWqQRz')),
'simplify': dict(zip('kgGNhjtTdDpPbBnmsrH',
'kkkkkwttttppppnmHHH')),
'guna': dict(zip('i I u U f F x X'.split(),
'e e o o ar ar al al'.split())),
'vrddhi': dict(zip('a i I u U f F x X e o'.split(),
'A E E O O Ar Ar Al Al E O'.split())),
'samprasarana': dict(zip('yrlv', 'ifxu'))
}
get = data[name].get
def func(L):
return get(L, L)
if docstring is None:
docstring = """{0} `L`. If this is not possible, return `L` unchanged.
:param L: the letter to {1}
""".format(name.capitalize(), name)
func.__name__ = name
func.__doc__ = docstring
return func
shorten = letter_transform('shorten')
lengthen = letter_transform('lengthen')
semivowel = letter_transform('semivowel')
aspirate = letter_transform('aspirate')
deaspirate = letter_transform('deaspirate')
voice = letter_transform('voice')
devoice = letter_transform('devoice')
nasalize = letter_transform('nasalize')
dentalize = letter_transform('dentalize')
retroflex = letter_transform('retroflex')
simplify = letter_transform('simplify',
docstring="""
Simplify the given letter, if possible.
Here, to "simplify" a letter is to reduce it to a sound that is permitted
to end a Sanskrit word. For instance, the `c` in `vAc` should be reduced
to `k`::
assert simplify('c') == 'k'
:param letter: the letter to simplify
"""
)
guna = letter_transform('guna',
docstring="""
Apply guna to the given letter, if possible.
"""
)
vrddhi = letter_transform('vrddhi',
docstring="""
Apply vrddhi to the given letter, if possible.
"""
)
samprasarana = letter_transform('samprasarana',
docstring="""
Apply samprasarana to the given letter, if possible.
"""
)
del letter_transform
# Term transformations
# --------------------
class Term(str):
def simplify(self):
"""Simplify the given string using consonant reduction."""
return self[:-1] + simplify(self[-1])
# Meter and metrical properties
# -----------------------------
def num_syllables(phrase):
"""Find the number of syllables in `phrase`.
:param phrase: the phrase to test
"""
return sum(1 for L in phrase if L in VOWELS)
def meter(phrase, heavy='_', light='.'):
"""Find the meter of the given phrase. Results are returned as a list
whose elements are either `heavy` and `light`.
By the traditional definition, a syllable is **heavy** if one of the
following is true:
- the vowel is long
- the vowel is short and followed by multiple consonants
- the vowel is followed by an anusvara or visarga
All other syllables are **light**.
:param phrase: the phrase to scan
:param heavy: used to indicate heavy syllables. By default it's a string,
but you can pass in anything.
:param light: used to indicate light syllables. By default it's a string,
but you can pass in anything.
"""
scan = []
had_consonant = False
# True iff we've seen an anusvara, a visarga, or some conjunct consonants
saw_cluster = False
append = scan.append
# Search for heavy syllable and call all other syllables light. Since
# syllable weight can depend on later consonants, we have to look ahead
# to determine the proper weight. An easy way to do that is to reverse
# the string:
for L in clean(phrase, ALL_SOUNDS)[::-1]:
if L in VOWELS:
if saw_cluster or L not in SHORT_VOWELS:
append(heavy)
else:
append(light)
saw_cluster = False
elif L in 'MH' or had_consonant:
saw_cluster = True
had_consonant = L in CONSONANTS
return scan[::-1]
|
/sanskrit_util-0.1.2.tar.gz/sanskrit_util-0.1.2/sanskrit_util/sounds.py
| 0.801897 | 0.239549 |
sounds.py
|
pypi
|
import six
import types
import os
from sqlalchemy import create_engine, inspect
from sqlalchemy.orm import scoped_session, sessionmaker
from .schema import Base, EnumBase, GenderGroup
class Context(object):
"""The package context. In addition to storing basic config information,
such as the database URI or paths to various data files, a :class:`Context`
also constructs a :class:`~sqlalchemy.orm.session.Session` class for
connecting to the database.
You can populate a context in several ways. For example, you can pass a
:class:`dict`::
context = Context({'DATABASE_URI': 'sqlite:///data.sqlite'})
or a path to a Python module::
context = Context('project/config.py')
If you initialize a context from a module, note that only uppercase
variables will be stored in the context. This lets you use lowercase
variables as temporary values.
Config values are stored internally as a :class:`dict`, so you can always
just use ordinary :class:`dict` methods::
context.config['FOO'] = 'baz'
:param config: an object to read from. If this is a string, treat
`config` as a module path and load values from that
module. Otherwise, treat `config` as a dictionary.
"""
def __init__(self, config=None, connect=True):
#: A :class:`dict` of various settings. By convention, all keys are
#: uppercase. These are used to create :attr:`engine` and
#: :attr:`session`.
self.config = {}
#: The :class:`~sqlalchemy.engine.Engine` that underlies
#: the :attr:`session`.
self.engine = None
#: A :class:`~sqlalchemy.orm.session.Session` class.
self.session = None
if isinstance(config, (six.text_type, six.string_types)):
filepath = config
config = types.ModuleType('config')
config.__file__ = filepath
try:
exec(open(filepath).read(), config.__dict__)
except IOError as e:
e.strerror = 'Cannot load config file: %s' % e.strerror
raise
try:
config = config or {}
for key in config:
if key.isupper():
self.config[key] = config[key]
except TypeError:
for key in dir(config):
if key.isupper():
self.config[key] = getattr(config, key)
def default(name, *args):
path = os.path.join(self.config['DATA_PATH'], *args)
self.config.setdefault(name, path)
default('COMPOUNDED_NOMINAL_ENDINGS', 'nominal-endings-compounded.csv')
default('ENUMS', 'enums.csv')
default('GERUNDS', 'gerunds.csv')
default('INDECLINABLES', 'indeclinables.csv')
default('INFINITIVES', 'infinitives.csv')
default('INFLECTED_NOMINAL_ENDINGS', 'nominal-endings-inflected.csv')
default('IRREGULAR_ADJECTIVES', 'irregular-adjectives.csv')
default('IRREGULAR_NOUNS', 'irregular-nouns.csv')
default('MODIFIED_ROOTS', 'modified-roots.csv')
default('NOMINAL_STEMS', 'nominal-stems.csv')
default('PARTICIPLE_STEMS', 'participle-stems.csv')
default('PREFIXED_ROOTS', 'prefixed-roots.csv')
default('PREFIX_GROUPS', 'prefix-groups.csv')
default('PRONOUNS', 'pronouns.csv')
default('SANDHI_RULES', 'sandhi-rules.csv')
default('UNPREFIXED_ROOTS', 'unprefixed-roots.csv')
default('VERBAL_INDECLINABLES', 'verbal-indeclinables.csv')
default('VERBS', 'verbs.csv')
default('VERB_ENDINGS', 'verb-endings.csv')
default('VERB_PREFIXES', 'verb-prefixes.csv')
default('VERB_STEMS', 'verb-stems.csv')
if connect and 'DATABASE_URI' in self.config:
self.connect()
def build(self):
"""Build all data."""
from sanskrit_util import setup
setup.run(self)
def connect(self):
"""Connect to the database."""
self.engine = create_engine(self.config['DATABASE_URI'])
self.session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
def create_all(self):
"""Create tables for every model in `sanskrit_util.schema`."""
metadata = Base.metadata
insp = inspect(self.engine)
extant = {t.name for t in metadata.tables.values() if insp.has_table(t.name)}
metadata.create_all(self.engine)
for name in metadata.sorted_tables:
if name not in extant:
print(' [ c ] {0}'.format(name))
def drop_all(self):
"""Drop all tables defined in `sanskrit_util.schema`."""
Base.metadata.drop_all(self.engine)
def _build_enums(self):
"""Fetch and store enumerated data."""
self._enum_id = {}
self._enum_abbr = {}
self._gender_set = {}
session = self.session
for cls in EnumBase.__subclasses__():
key = cls.__tablename__
self._enum_id[key] = enum_id = {}
self._enum_abbr[key] = enum_abbr = {}
for item in session.query(cls).all():
enum_id[item.name] = enum_id[item.abbr] = item.id
enum_abbr[item.id] = enum_abbr[item.name] = item.abbr
for group in session.query(GenderGroup):
member_ids = set([x.id for x in group.members])
self._gender_set[group.id] = member_ids
session.remove()
@property
def enum_id(self):
"""Maps a name or abbreviation to an ID."""
try:
return self._enum_id
except AttributeError:
self._build_enums()
return self._enum_id
@property
def enum_abbr(self):
"""Maps an ID or name to an abbreviation."""
try:
return self._enum_abbr
except AttributeError:
self._build_enums()
return self._enum_abbr
@property
def gender_set(self):
try:
return self._gender_set
except AttributeError:
self._build_enums()
return self._gender_set
|
/sanskrit_util-0.1.2.tar.gz/sanskrit_util-0.1.2/sanskrit_util/context.py
| 0.684897 | 0.214393 |
context.py
|
pypi
|
import re
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm import relationship
Base = declarative_base()
class SimpleBase(Base):
"""A simple default base class.
This automatically creates:
- __tablename__
- id (primary key)
- name (string)
"""
__abstract__ = True
@declared_attr
def __tablename__(cls):
return re.sub('(?<!^)(?=[A-Z])', '_', cls.__name__).lower()
id = Column(Integer, primary_key=True)
name = Column(String, index=True)
def __repr__(self):
cls = self.__class__.__name__
return "%s%r" % (cls, (self.id, self.name))
class EnumBase(SimpleBase):
"""Base class for enumerations.
Each enumeration has a name and an abbreviation.
"""
__abstract__ = True
abbr = Column(String)
def __repr__(self):
cls = self.__class__.__name__
return "%s%r" % (cls, (self.id, self.name, self.abbr))
# Enumerations
# ------------
# Instead of enums, we use the following:
class Person(EnumBase):
"""Grammatical person:
- first person, corresponding to Panini's **uttamapuruṣa**
- second person, corresponding to Panini's **madhyamapuruṣa**
- third person, corresponding to Panini's **prathamapuruṣa**
"""
class Number(EnumBase):
"""Grammatical number:
- singular, corresponding to Panini's **ekavacana**
- dual, corresponding to Panini's **dvivacana**
- plural, corresponding to Panini's **bahuvacana**
"""
class Mode(EnumBase):
"""Tenses and moods:
- present, corresponding to Panini's **laṭ**
- aorist, corresponding to Panini's **luṅ**
- imperfect, corresponding to Panini's **laṅ**
- perfect, corresponding to Panini's **liṭ**
- simple future, corresponding to Panini's **lṛṭ**
- distant future, corresponding to Panini's **luṭ**
- conditional, corresponding to Panini's **lṛṅ**
- optative, corresponding to Panini's **vidhi-liṅ**
- imperative, corresponding to Panini's **loṭ**
- benedictive, corresponding to Panini's **āśīr-liṅ**
- injunctive
- future optative
- future imperative
"""
class Voice(EnumBase):
"""Grammatical voice:
- parasmaipada
- ātmanepada
- ubhayapada
"""
class Gender(EnumBase):
"""Grammatical gender:
- masculine, corresponding to Panini's **puṃliṅga**
- feminine, corresponding to Panini's **strīliṅga**
- neuter, corresponding to Panini's **napuṃsakaliṅga**
- unknown/undefined
"""
class GenderGroup(EnumBase):
"""Grammatical gender of a nominal stem. Since stems support nearly
every combination of genders, this class stores both individual
genders:
- masculine
- feminine
- neuter
- unknown/undefined
and collections of genders:
- masculine and feminine
- masculine and neuter
- feminine and neuter
- masculine, feminine, and neuter
"""
members = relationship('Gender', secondary='gender_group_assocs', backref='groups')
class Case(EnumBase):
"""Grammatical case.
- Nominative case, corresponding to Panini's **prathamā**
- Accusative case, corresponding to Panini's **dvitīyā**
- Instrumental case, corresponding to Panini's **tṛtīyā**
- Dative case, corresponding to Panini's **caturthī**
- Ablative case, corresponding to Panini's **pañcamī**
- Genitive case, corresponding to Panini's **ṣaṣṭhī**
- Locative case, corresponding to Panini's **saptamī**
- Vocative case, corresponding to Panini's **saṃbodhana**
"""
class VClass(EnumBase):
"""Verb class:
- class 1, corresponding to Panini's **bhvādi**
- class 2, corresponding to Panini's **adādi**
- class 3, corresponding to Panini's **juhotyādi**
- class 4, corresponding to Panini's **divādi**
- class 5, corresponding to Panini's **svādi**
- class 6, corresponding to Panini's **tudādi**
- class 7, corresponding to Panini's **rudhādi**
- class 8, corresponding to Panini's **tanādi**
- class 9, corresponding to Panini's **kryādi**
- class 10, corresponding to Panini's **curādi**
- class unknown, for verbs like "ah"
- nominal, corresponding to various Paninian terms
"""
__tablename__ = 'vclass'
class Modification(EnumBase):
"""Verb modification:
- Causative, corresponding to Panini's **ṇic**
- Desiderative, corresponding to Panini's **san**
- Intensive, corresponding to Panini's **yaṅ**
"""
# Roots, stems, and forms
# =======================
class Tag(SimpleBase):
"""Part of speech tag. It contains the following:
- noun
- pronoun
- adjective
- indeclinable
- verb
- gerund
- infinitive
- participle
- perfect indeclinable (also known as the *periphrastic perfect*)
- noun prefix
- verb prefix
"""
VERB = 1
NOMINAL = 2
PRONOUN = 3
PARTICIPLE = 4
INDECLINABLE = 5
VERBAL_INDECLINABLE = 6
GERUND = 7
INFINITIVE = 8
PERFECT_INDECLINABLE = 9
NOUN_PREFIX = 10
VERB_PREFIX = 11
# Unfinished forms
# ----------------
# Roots, stems, and prefixes
class Prefix(SimpleBase):
"""A generic prefix."""
pos_id = Column(ForeignKey(Tag.id))
pos = relationship(Tag)
__mapper_args__ = {'polymorphic_on': pos_id}
class VerbPrefix(Prefix):
"""A verb prefix. This corresponds to Panini's **gati**, which includes
**cvi** (`svāgatī-karoti`) and **upasarga** (`anu-karoti`).
"""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.VERB_PREFIX}
class NounPrefix(Prefix):
"""A noun prefix. This includes `nañ`, among others."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.NOUN_PREFIX}
class NominalEnding(SimpleBase):
"""A suffix for regular nouns and adjectives. This corresponds to
Panini's **sup**."""
CONSONANT_STEM_TYPE = '$cons'
stem_type = Column(String)
gender_id = Column(ForeignKey(Gender.id))
case_id = Column(ForeignKey(Case.id))
number_id = Column(ForeignKey(Number.id))
compounded = Column(Boolean)
gender = relationship(Gender)
case = relationship(Case)
number = relationship(Number)
class VerbEnding(SimpleBase):
"""A suffix for conjugated verbs of any kind. This corresponds to
Panini's **tiṅ**."""
# : Name of the stem category that uses this ending. This is useful
# : for partitioning the list of endings according to the stem under
# : consideration.
# :
# : The names of these groups depend on the initial data. The default
# : data uses these names:
# : - `'simple'` for classes 1, 4, 6, and 10
# : - `'complex'` for classes 2, 3, 45, 7, 8, and 9
# : - `'both'` for all classes
category = Column(String)
person_id = Column(ForeignKey(Person.id))
number_id = Column(ForeignKey(Number.id))
mode_id = Column(ForeignKey(Mode.id))
voice_id = Column(ForeignKey(Voice.id))
person = relationship(Person)
number = relationship(Number)
mode = relationship(Mode)
voice = relationship(Voice)
class Root(SimpleBase):
"""A verb root. This corresponds to Panini's **dhātu**:
| 1.3.1 "bhū" etc. are called `dhātu`.
| 3.1.22 Terms ending in `san` etc. are called dhātu.
Moreover, :class:`Root` contains prefixed roots. Although this modeling
choice is non-Paninian, it does express the notion that verb prefixes can
cause profound changes in a root's meaning and identity.
"""
# re-declared for use with `remote_side` below
id = Column(Integer, primary_key=True)
# : The ultimate ancestor of this root. For instance, the basis of
# : "sam-upa-gam" is "gam".
basis_id = Column(ForeignKey('root.id'))
discriminator = Column(Integer)
basis = relationship('Root', remote_side=[id])
vclasses = association_proxy('paradigms', 'vclass')
voices = association_proxy('paradigms', 'voice')
__mapper_args__ = {
'polymorphic_identity': 0,
'polymorphic_on': discriminator
}
class PrefixedRoot(Root):
"""A root with one or more prefixes."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': 1}
prefix_assocs = relationship('RootPrefixAssociation',
collection_class=ordering_list('position'),
order_by='RootPrefixAssociation.position')
prefixes = association_proxy('prefix_assocs', 'prefix')
class ModifiedRoot(Root):
"""A root with one or more modifications."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': 2}
mod_assocs = relationship('RootModAssociation',
collection_class=ordering_list('position'),
order_by='RootModAssociation.position')
modifications = association_proxy('mod_assocs', 'modification')
class PrefixedModifiedRoot(PrefixedRoot):
"""A prefixed root with one or more modifications."""
__mapper_args__ = {'polymorphic_identity': 3}
# define `mod_assocs` and `modifications` explicitly for ModifiedRoot
mod_assocs = relationship('RootModAssociation',
collection_class=ordering_list('position'),
order_by='RootModAssociation.position')
modifications = association_proxy('mod_assocs', 'modification')
class Stem(SimpleBase):
"""A nominal stem. This corresponds to Panini's **aṅga**:
| 1.4.13 Anything to which a suffix can be added is called an `aṅga`.
But although "aṅga" also includes "dhātu," :class:`Stem` does
not. Verb roots are stored in :class:`Root`.
"""
genders_id = Column(ForeignKey(GenderGroup.id))
pos_id = Column(ForeignKey(Tag.id))
# : ``True`` iff a stem can produce its own words. For stems like "nara"
# : or "agni" this value is ``True``. For stems like "ja" (dependent on
# : upapada, as in "agra-ja") or "tva" (a suffix, as in "sama-tva"),
# : this value is ``False``.
dependent = Column(Boolean, default=False)
genders = relationship(GenderGroup)
pos = relationship(Tag)
__mapper_args__ = {'polymorphic_on': pos_id}
class NominalStem(Stem):
"""Stem of a :class:`Nominal`."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.NOMINAL}
class PronounStem(Stem):
"""Stem of a :class:`Pronoun`."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.PRONOUN}
class ParticipleStem(Stem):
"""Stem of a :class:`Participle`."""
__tablename__ = 'participlestem'
__mapper_args__ = {'polymorphic_identity': Tag.PARTICIPLE}
id = Column(ForeignKey(Stem.id), primary_key=True)
# root_id = Column(ForeignKey(Root.id))
mode_id = Column(ForeignKey(Mode.id))
voice_id = Column(ForeignKey(Voice.id))
# root = relationship(Root, backref='participle_stems')
mode = relationship(Mode)
voice = relationship(Voice)
class StemIrregularity(Base):
"""Record of an irregular stem.
Some Sanskrit stems are inflected irregularly. Since only an
exceedingly small number of stems is irregular (< 100), it's
easiest to record those irregularities in their own scheme.
"""
__tablename__ = 'stem_irregularity'
id = Column(ForeignKey(Stem.id), primary_key=True)
# : If ``True``, assume that only the forms stored in the database
# : are valid. If ``False``, assume that all unspecified forms can
# : be generated by applying normal rules and endings to the stem.
fully_described = Column(Boolean)
stem = relationship(Stem)
# Completed forms
# ---------------
# Nouns, verbs, indeclinables, adjectives, and the like. In Paninian terms,
# these are all `pada`s.
class Form(SimpleBase):
"""A complete form. This corresponds to Panini's **pada**:
| 1.4.14 Terms ending in "sup" and "tiṅ" are called `pada`.
In other words, a :class:`Form` is a self-contained linguistic unit that
could be used in a sentence as-is.
"""
pos_id = Column(ForeignKey(Tag.id))
pos = relationship(Tag)
__mapper_args__ = {'polymorphic_on': pos_id}
class Indeclinable(Form):
"""A complete form. This corresponds to Panini's **avyaya**."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.INDECLINABLE}
class Verb(Form):
"""A complete form. This corresponds to Panini's **tiṅanta**."""
__tablename__ = 'verb'
__mapper_args__ = {'polymorphic_identity': Tag.VERB}
id = Column(ForeignKey(Form.id), primary_key=True)
root_id = Column(ForeignKey(Root.id))
vclass_id = Column(ForeignKey(VClass.id))
person_id = Column(ForeignKey(Person.id))
number_id = Column(ForeignKey(Number.id))
mode_id = Column(ForeignKey(Mode.id))
voice_id = Column(ForeignKey(Voice.id))
root = relationship(Root, backref='verbs')
person = relationship(Person)
number = relationship(Number)
mode = relationship(Mode)
voice = relationship(Voice)
class VerbalIndeclinable(Form):
"""A complete form. :class:`VerbalIndeclinable` is a superclass for
three more specific classes: :class:`Gerund`, :class:`Infinitive`, and
:class:`PerfectIndeclinable`.
"""
__tablename__ = 'verbalindeclinable'
__mapper_args__ = {'polymorphic_identity': Tag.VERBAL_INDECLINABLE}
id = Column(ForeignKey(Form.id), primary_key=True)
root_id = Column(ForeignKey(Root.id))
root = relationship(Root)
class Infinitive(VerbalIndeclinable):
"""A complete form. This corresponds to Panini's **tumanta**."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.INFINITIVE}
class Gerund(VerbalIndeclinable):
"""A complete form. This corresponds to Panini's **ktvānta** and
**lyabanta**."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.GERUND}
class PerfectIndeclinable(VerbalIndeclinable):
"""A complete form. This corresponds to forms ending in the suffix **ām**,
as in "īkṣāṃ cakre".
"""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.PERFECT_INDECLINABLE}
class AbstractNominal(Form):
"""A complete nominal form. This corresponds to Panini's **subanta**."""
__tablename__ = 'nominal'
id = Column(ForeignKey(Form.id), primary_key=True)
stem_id = Column(ForeignKey(Stem.id))
gender_id = Column(ForeignKey(Gender.id))
case_id = Column(ForeignKey(Case.id))
number_id = Column(ForeignKey(Number.id))
compounded = Column(Boolean)
stem = relationship(Stem, backref='forms')
gender = relationship(Gender)
case = relationship(Case)
number = relationship(Number)
class Nominal(AbstractNominal):
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.NOMINAL}
class Participle(AbstractNominal):
"""A complete form. This corresponds to Panini's **niṣṭhā** and **sat**.
Moreover, it also corresponds to **kvasu**."""
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': Tag.PARTICIPLE}
root = association_proxy('stem', 'root')
# Associations
# ------------
# Code for building various many-to-many relationships
class GenderGroupAssociation(Base):
__tablename__ = 'gender_group_assocs'
id = Column(Integer, primary_key=True)
group_id = Column(ForeignKey(GenderGroup.id))
gender_id = Column(ForeignKey(Gender.id))
def __init__(self, gender_id):
self.gender_id = gender_id
class Paradigm(SimpleBase):
"""Represents an inflectional paradigm. This associates a root with a
particular class and voice.
"""
root_id = Column(ForeignKey(Root.id), index=True)
vclass_id = Column(ForeignKey(VClass.id))
voice_id = Column(ForeignKey(Voice.id))
default = Column(Boolean, default=False)
root = relationship(Root, backref='paradigms')
vclass = relationship(VClass)
voice = relationship(Voice)
class RootPrefixAssociation(SimpleBase):
"""Associates a prefixed root with a list of prefixes."""
root_id = Column(ForeignKey(PrefixedRoot.id))
prefix_id = Column(ForeignKey(VerbPrefix.id))
position = Column(Integer)
prefix = relationship(VerbPrefix)
def __init__(self, prefix):
self.prefix = prefix
class RootModAssociation(SimpleBase):
"""Associates a modified root with a list of modifications."""
root_id = Column(ForeignKey(ModifiedRoot.id))
modification_id = Column(ForeignKey(Modification.id))
position = Column(Integer)
modification = relationship(Modification)
def __init__(self, modification):
self.modification = modification
# Sandhi rules
# ============
class SandhiType(EnumBase):
"""Rule type. Sandhi rules are usually of three types:
- *external* rules, which act between words
- *internal* rules, which act between morphemes
- *general* rules, which act in any context
"""
class SandhiRule(SimpleBase):
__tablename__ = 'sandhi'
first = Column(String)
second = Column(String)
result = Column(String)
rule_type = Column(ForeignKey(SandhiType.id))
def __repr__(self):
values = (self.id, self.first, self.second, self.result)
return 'SandhiRule(%r, %r, %r, %r)' % values
|
/sanskrit_util-0.1.2.tar.gz/sanskrit_util-0.1.2/sanskrit_util/schema.py
| 0.674158 | 0.428293 |
schema.py
|
pypi
|
from collections import defaultdict, namedtuple
from . import sounds, util
from .schema import *
import pprint
Ending = namedtuple('Ending', ['name', 'length', 'stem_type', 'gender_id',
'case_id', 'number_id', 'compounded',
'is_consonant_stem'])
class Analyzer(object):
"""analyzer"""
def __init__(self):
raise NotImplementedError
def analyze(self, token):
raise NotImplementedError
class SimpleAnalyzer(Analyzer):
"""A simple analyzer for Sanskrit words. The analyzer is simple
for a few reasons:
- It doesn't do any caching.
- It uses an ORM instead of raw SQL queries.
- Its output is always "well-formed." For example, neuter nouns can
take only neuter endings.
This analyzer is best used when memory is at a premium and speed is
a secondary concern (e.g. when on a web server).
"""
def __init__(self, ctx):
self.ctx = ctx
self.session = ctx.session
self.nominal_endings = util.HashTrie()
for e in self.session.query(NominalEnding):
stem_type = e.stem_type
is_cons = stem_type[-1] in sounds.CONSONANTS
if e.stem_type == "_":
e.stem_type = ""
is_cons = True
data = {
'name': e.name,
'stem_type': e.stem_type,
'length': len(e.name),
'gender_id': e.gender_id,
'case_id': e.case_id,
'number_id': e.number_id,
'compounded': e.compounded,
'is_consonant_stem': is_cons,
}
self.nominal_endings[e.name[::-1]] = Ending(**data)
if 'n' in e.name:
# TODO: do this more rigorously
reversed_name = e.name.replace('n', 'R')
data['name'] = reversed_name
self.nominal_endings[reversed_name[::-1]] = Ending(**data)
self.session.remove()
def _analyze_as_form(self, word):
"""
Analyze a word by searching for an exact match in the database.
:param word: the word to analyze
"""
session = self.session
results = session.query(Form).filter(Form.name == word).all()
return results
def _analyze_as_stem(self, word):
"""
Analyze a word by searching for the nominal stems that might
have produced it.
:param word: the word to analyze
"""
session = self.session
gender_set = self.ctx.gender_set
returned = []
# Find all stems that could produce this word. Some of these
# stems might not exist.
stem_endings_map = defaultdict(set)
endings = self.nominal_endings[word[::-1]]
for e in endings:
if e.length > len(word):
continue
stem = word[:-e.length] + e.stem_type
if e.is_consonant_stem:
# Stem must exist and end in a consonant.
if not stem:
continue
if stem[-1] in sounds.VOWELS:
continue
if stem in sounds.CONSONANTS:
continue
stem_endings_map[stem].add(e)
if not stem_endings_map:
return []
# Check which of these stems are viable
stems = session.query(Stem) \
.filter(Stem.name.in_(stem_endings_map.keys()))
# Reattach endings to viable stems
for stem in stems:
name = stem.name
# For nouns, disregard endings that don't match the stem's
# genders.
# TODO: fix semantics of this
if stem.pos_id == Tag.NOMINAL:
stem_genders = gender_set[stem.genders_id]
endings = (e for e in stem_endings_map[name]
if e.gender_id in stem_genders)
else:
endings = stem_endings_map[name]
if stem.pos_id == Tag.PARTICIPLE:
# Remove feminine endings in 'at'
endings = (e for e in endings if not (e.gender_id == 2 and (e.stem_type != 'at' or e.stem_type != 't')))
# Remove duplicates
endings = {(e.gender_id, e.case_id, e.number_id, e.compounded) for e in endings}
for gender_id, case_id, number_id, compounded in endings:
datum = {
'name': word,
'pos_id': stem.pos_id,
'stem': stem,
'gender_id': gender_id,
'case_id': case_id,
'number_id': number_id,
'compounded': compounded,
}
returned.append(Nominal(**datum))
return returned
def analyze(self, word):
"""Return all possible solutions for the given word. Any ORM
objects used in these solutions will be in a detached state.
:param word: the word to analyze. This should be a complete
word, or what Panini would call a *pada*.
"""
returned = self._analyze_as_form(word)
returned.extend(self._analyze_as_stem(word))
return returned
|
/sanskrit_util-0.1.2.tar.gz/sanskrit_util-0.1.2/sanskrit_util/analyze.py
| 0.506836 | 0.296234 |
analyze.py
|
pypi
|
from builtins import next
from builtins import zip
from builtins import range
from builtins import object
import six
from . import sounds
from .util import HashTrie
class Exempt(six.text_type):
"""A helper class for marking strings as exempt from sandhi changes. To
mark a string as exempt, just do the following::
original = 'amI'
exempt = Exempt('amI')
:class:`Exempt` is a subclass of :class:`unicode`, so you can use normal
string methods on :class:`Exempt` objects.
"""
class SandhiObject(object):
def add_rules(self, rules):
raise NotImplementedError
class Joiner(SandhiObject):
"""Joins multiple Sanskrit terms by applying sandhi rules."""
def __init__(self, rules=None):
self.data = {}
if rules:
self.add_rules(rules)
def add_rules(self, rules):
"""Add rules for joining words.
Example usage::
joiner.add_rules[('a', 'i', 'e'), ('a', 'a', 'A'])
:param rules: a list of 3-tuples, each of which contains:
- the first part of the combination
- the second part of the combination
- the result
"""
self.data = {}
for first, second, result in rules:
self.data[(first, second)] = result
@staticmethod
def internal_retroflex(term):
"""Apply the "n -> ṇ" and "s -> ṣ" rules of internal sandhi.
:param term: the string to process
"""
# causes "s" retroflexion
s_trigger = set('iIuUfFeEoOkr')
# causes "n" retroflexion
n_trigger = set('fFrz')
# Allowed after n_trigger
n_between = sounds.VOWELS.union('kKgGNpPbBmhvyM')
# Must appear after the retroflexed "n"
n_after = sounds.VOWELS.union('myvn')
# Defines t retroflexion
retroflexion_dict = dict(zip('tT', 'wW'))
letters = list(term)
apply_s = False
apply_n = False
had_n = False # Used for double retroflexion ('nisanna' -> 'nizaRRa')
had_s = False # Used for 't' retroflexion
for i, L in enumerate(letters[:-1]):
# "t" retroflexion after "s" retroflexion
if had_s:
had_s = False
letters[i] = retroflexion_dict.get(L, L)
# "s" retroflexion
if apply_s and L == 's':
letters[i] = L = 'z'
had_s = True
apply_s = L in s_trigger
# "n" retroflexion
if had_n and L == 'n':
letters[i] = 'R'
had_n = False
elif apply_n and L == 'n' and letters[i + 1] in n_after:
letters[i] = 'R'
had_n = True
if L in n_trigger:
apply_n = True
else:
apply_n = apply_n and L in n_between
return ''.join(letters)
def join(self, chunks, internal=False):
"""Join the given chunks according to the object's rules::
assert 'tasyecCA' == s.join('tasya', 'icCA')
:meth:`join` does **not** take pragṛhya rules into account. As a
reminder, the main exception are:
| 1.1.11 "ī", "ū", and "e" when they end words in the dual.
| 1.1.12 the same vowels after the "m" of adas;
| 1.1.13 particles with just one vowel, apart from "ā"
| 1.1.14 particles that end in "o".
One simple way to account for these rules is to wrap exempt strings
with :class:`Exempt`::
assert joiner.join('te', 'iti') == 'ta iti'
assert joiner.join(Exempt('te'), 'iti') == 'te iti'
:param chunks: a list of the strings that should be joined
:param internal: if true, join words using the empty string instead of
`' '`.
"""
separator = '' if internal else ' '
it = iter(chunks)
returned = next(it)
for chunk in it:
if not chunk:
continue
if isinstance(returned, Exempt):
returned += separator + chunk
else:
# `i` controls the number of letters to grab from the end of
# the first word. For most rules, one letter is sufficient.
# But visarga sandhi needs slightly more context.
for i in (2, 1, 0):
if not i:
returned += separator + chunk
break
key = (returned[-i:], chunk[0])
result = self.data.get(key, None)
if result:
returned = returned[:-i] + result + chunk[1:]
break
if isinstance(chunk, Exempt):
returned = Exempt(returned)
if internal:
return Joiner.internal_retroflex(returned)
else:
return returned
class Splitter(object):
"""Splits Sanskrit terms by undoing sandhi rules."""
def __init__(self, rules=None):
""""""
self.data = HashTrie()
if rules:
self.add_rules(rules)
def add_rules(self, rules):
for first, second, result in rules:
result = result.replace(' ', '')
items = (first, second, result, len(first), len(second),
len(result))
self.data[result] = items
def iter_splits(self, chunk):
"""Return a generator for all splits in `chunk`. Results are yielded
as 2-tuples containing the term before the split and the term after::
for item in s.splits('nareti'):
before, after = item
:meth:`splits` will generate many false positives, usually when the
first part of the split ends in an invalid consonant::
assert ('narAv', 'iti') in s.splits('narAviti')
These should be filtered out in the calling function.
Splits are generated from left to right, but the function makes no
guarantees on when certain rules are applied. That is, output is
loosely ordered but nondeterministic.
"""
chunk_len = len(chunk)
for i in range(chunk_len):
# Default split: chop the chunk in half with no other changes.
# This can yield a lot of false positives.
chunk1, chunk2 = chunk[:i], chunk[i:]
if i:
yield (chunk1, chunk2)
# Rule-based splits: undo a sandhi change
rules = self.data[chunk2]
for first, second, result, _, _, len_result in rules:
before = chunk1 + first
after = second + chunk2[len_result:]
yield (before, after)
# Non-split: yield the chunk as-is.
yield (chunk, '')
|
/sanskrit_util-0.1.2.tar.gz/sanskrit_util-0.1.2/sanskrit_util/sandhi.py
| 0.836087 | 0.379005 |
sandhi.py
|
pypi
|
from collections import defaultdict
import six
from . import sounds
from .generate import NominalGenerator
from .schema import *
class SimpleQuery(object):
"""A simple API for database access."""
def __init__(self, ctx):
self.ctx = ctx
self.session = ctx.session
self.nominal = NominalGenerator(ctx)
# Store IDs of irregular stems
irreg = self.session.query(StemIrregularity) \
.filter(StemIrregularity.fully_described == True)
self.irregular_stems = set([x.id for x in irreg])
self.session.remove()
def _fetch_nominal_paradigm(self, stem_id, gender_id):
"""Fetch a nominal paradigm from the database."""
enum_abbr = self.ctx.enum_abbr
results = self.session.query(Nominal)\
.filter(Nominal.stem_id == stem_id)\
.filter(Nominal.gender_id == gender_id)
returned = {}
for nominal in results:
case = enum_abbr['case'][nominal.case_id]
number = enum_abbr['number'][nominal.number_id]
returned[(case, number)] = nominal.name
return returned
def _nominal_stem(self, stem_name, stem_cls=None):
"""Fetch a nominal stem from the database."""
stem_cls = stem_cls or Stem
stem = self.session.query(stem_cls)\
.filter(stem_cls.name == stem_name)\
.first()
return stem
def _simplify(self, forms):
"""Simplify the given forms by applying consonant reduction."""
for parse, name in six.iteritems(forms):
forms[parse] = sounds.Term(name).simplify()
def noun(self, stem_name, gender):
"""Query for nouns.
:param stem_name: the stem name
:param gender: the noun gender
"""
stem = self._nominal_stem(stem_name, NominalStem)
if stem is None:
return {}
if stem.id in self.irregular_stems:
gender_id = self.ctx.enum_id['gender'][gender]
returned = self._fetch_nominal_paradigm(stem.id, gender_id)
elif stem:
returned = self.nominal.paradigm(stem_name, gender)
self._simplify(returned)
return returned
def pronoun(self, stem_name, gender):
"""Query for pronouns.
:param stem_name: the stem name
:param gender: the pronoun gender
"""
stem = self._nominal_stem(stem_name, PronounStem)
if stem is None:
return {}
gender_id = self.ctx.enum_id['gender'][gender]
returned = self._fetch_nominal_paradigm(stem.id, gender_id)
self._simplify(returned)
return returned
def verb(self, root_name, mode, voice, vclass=None, **kw):
"""Query for inflected verbs.
:param root_name: the verb root
:param mode: the verb mode
:param voice: the verb voice
:param vclass: the verb class to use. This can be used to
distinguish between homophonous roots, such as
'kR' ("do") and 'kR' ("praise").
"""
enum_id = self.ctx.enum_id
enum_abbr = self.ctx.enum_abbr
session = self.session
roots = session.query(Root).filter(Root.name == root_name)
roots_id = [r.id for r in roots]
mode_id = enum_id['mode'][mode]
voice_id = enum_id['voice'][voice]
returned = {}
results = session.query(Verb)\
.filter(Verb.root_id.in_(roots_id))\
.filter(Verb.mode_id == mode_id)\
.filter(Verb.voice_id == voice_id)
for verb in results:
person = enum_abbr['person'][verb.person_id]
number = enum_abbr['number'][verb.number_id]
returned[(person, number)] = verb.name
session.close()
self._simplify(returned)
return returned
def verb_summary(self, root_name, vclass=None):
"""Query for a summary of a verb's behavior.
The function returns the following information:
- the root ID
- the 3rd. sg. forms of all verbs produced from the plain root
- the stems of all participles produced from the plain root
:param root: the verb root
:param vclass: the verb class to use. This can be used to
distinguish between homophonous roots, such as
'kR' ("do") and 'kR' ("praise").
"""
verbs = defaultdict(list)
participles = defaultdict(list)
ctx = self.ctx
session = self.session
# abbr -> ID
ei_person = ctx.enum_id['person']
ei_number = ctx.enum_id['number']
# ID -> abbr
ea_mode = ctx.enum_abbr['mode']
ea_voice = ctx.enum_abbr['voice']
# Root
root = session.query(Root).filter(Root.name == root_name).first()
if root is None:
return {}
root_id = root.id
# Verbs
results = session.query(Verb).filter(Verb.root_id == root_id) \
.filter(Verb.person_id == ei_person['3']) \
.filter(Verb.number_id == ei_number['s'])\
for r in results:
mode = ea_mode[r.mode_id]
voice = ea_voice[r.voice_id]
verbs[(mode, voice)].append(r.name)
# Participles
results = session.query(ParticipleStem) \
.filter(ParticipleStem.root_id == root_id)
for r in results:
mode = ea_mode[r.mode_id]
voice = ea_voice[r.voice_id]
participles[(mode, voice)].append(r.name)
session.close()
return {
'root_id': root_id,
'verbs': verbs,
'participles': participles,
}
|
/sanskrit_util-0.1.2.tar.gz/sanskrit_util-0.1.2/sanskrit_util/query.py
| 0.909132 | 0.302642 |
query.py
|
pypi
|
# LDAP Events
Here are some common scenarios in the LDAP protocol and how they can be implemented with this library.
The examples here are all based on an IO-less connection, this layer still needs to be provided by a higher layer.
## Authentication
Authentication with LDAP falls into two different categories:
* Simple binds
* SASL binds
A simple bind works by providing the username and password in plaintext to be sent to the server.
Simple binds do not support message encryption and typically rely on the outer transport to encrypt the data, for example through TLS.
An example simple bind in Python looks like:
```python
import sansldap
client = sansldap.LDAPClient()
server = sansldap.LDAPServer()
client.bind_simple("username", "password")
client_outgoing = client.data_to_send()
bind_request = server.receive(client_outgoing)[0]
assert isinstance(bind_request, sansldap.BindRequest)
assert isinstance(bind_request.authentication, sansldap.SimpleCredential)
assert bind_request.name == "username"
assert bind_request.authentication.password == "password"
# Can specify an error code on a faulty bind.
server.bind_response(bind_request.message_id)
server_outgoing = server.data_to_send()
bind_response = client.receive(server_outgoing)[0]
assert isinstance(bind_response, sansldap.BindResponse)
assert bind_response.result.result_code == sansldap.LDAPResultCode.SUCCESS
```
A SASL bind is more complex and is designed to encapulsate credential blobs from other providers, like GSSAPI, as part of the bind operation.
A SASL bind can require multiple messages to complete the request and the number of payloads is dependent on the SASL mechanism used.
SASL examples can be found in [sasl.py](https://github.com/jborean93/sansldap/tests/examples/sasl.py).
### Exchanged Messages
|Message|Source|Purpose|
|-|-|-|
|[BindRequest](./source/sansldap.html#sansldap.BindRequest)|Client|Starts the bind request|
|[BindResponse](./source/sansldap.html#sansldap.BindResponse)|Server|Server's response to the bind request|
## Search Request
A search request is performed to search the LDAP database for specific entries.
See [LDAPClient.search_reqest](./source/sansldap.html#sansldap.LDAPClient.search_request) for more information on the parameters that a client can provide on a search request.
During the search request the server can send a `SearchResultEntry`, `SearchResultReference`, or a `SearchResultDone` message using the same message id from the `SearchRequest.
The `SearchResultDone` message should be the last message for the operation and indicates the server will send no more results for this operation.
An example of a search request operation is:
```python
import sansldap
client = sansldap.LDAPClient()
server = sansldap.LDAPServer()
# Client most likely needs to be bound before the search
# There are more options that can be provided for a request.
client.search_request("", attributes=["defaultNamingContext"])
client_outgoing = client.data_to_send()
search_request = server.receive(client_outgoing)[0]
assert isinstance(search_request, sansldap.SearchRequest)
assert search_request.base_object == ""
assert search_request.attributes == ["defaultNamingContext"]
server.search_result_entry(
search_request.message_id,
search_request.base_object,
attributes=[
sansldap.PartialAttribute("defaultNamingContext", [b"DC=domain,DC=test"]),
],
)
server.search_result_done(search_request.message_id)
server_outgoing = server.data_to_send()
responses = client.receive(server_outgoing)
assert len(responses) == 2
assert isinstance(responses[0], sansldap.SearchResultEntry)
assert isinstance(responses[1], sansldap.SearchResultDone)
assert len(responses[0].attributes) == 1
assert responses[0].attributes[0].name == "defaultNamingContext"
assert responses[0].attributes[0].values == [b"DC=domain,DC=test"]
assert responses[1].result.result_code == sansldap.LDAPResultCode.SUCCESS
```
### Exchanged Messages
|Message|Source|Purpose|
|-|-|-|
|[SearchRequest](./source/sansldap.html#sansldap.SearchRequest)|Client|Initiates the search operation|
|[SearchResultEntry](./source/sansldap.html#sansldap.SearchResultEntry)|Server|A search result|
|[SearchResultReference](./source/sansldap.html#sansldap.SearchResultReference)|Server|A reference was encountered for a dataset in another server|
|[SearchResultDone](./source/sansldap.html#sansldap.SearchResultDone)|Server|The search operation is complete|
## StartTLS
A StartTLS operation is used to wrap an LDAP connection over port 389 in a TLS channel.
It is also known as explicit TLS and while most implementations recommend using LDAPS over port 686, StartTLS may be required by the server.
Performing a StartTLS operation is done by sending an ExtendedRequest, waiting for the ExtendedResponse, and then changing the transport channel to TLS wrap the messages.
An example of a StartTLS operation is:
```python
import sansldap
client = sansldap.LDAPClient()
server = sansldap.LDAPServer()
client.extended_request(sansldap.ExtendedOperations.LDAP_START_TLS.value)
client_outgoing = client.data_to_send()
ext_request = server.receive(client_outgoing)[0]
assert isinstance(ext_request, sansldap.ExtendedRequest)
assert ext_request.name == sansldap.ExtendedOperations.LDAP_START_TLS.value
server.extended_response(ext_request.message_id)
server_outgoing = server.data_to_send()
ext_response = client.receive(server_outgoing)[0]
assert isinstance(ext_response, sansldap.ExtendedResponse)
assert ext_response.result.result_code == sansldap.LDAPResultCode.SUCCESS
# The TLS handshake needs to be performed on the socket to set up the TLS
# channel. Any future messages to send from data_to_send() needs to be wrapped
# by the TLS channel that was set up.
```
### Exchanged Messages
|Message|Source|Purpose|
|-|-|-|
|[ExtendedRequest](./source/sansldap.html#sansldap.ExtendedRequest)|Client|Starts the StartTLS operation|
|[ExtendedResponse](./source/sansldap.html#sansldap.ExtendedResponse)|Server|Server's response to the StartTLS request|
|
/sansldap-0.1.0.tar.gz/sansldap-0.1.0/docs/events.md
| 0.532911 | 0.739705 |
events.md
|
pypi
|
from typing import Tuple
RELATIVE_DIRECTIONS = {
'U': (0, 1),
'D': (0, -1),
'L': (-1, 0),
'R': (1, 0),
}
GEOGRAPHICAL_DIRECTIONS = {
'N': (0, 1),
'S': (0, -1),
'W': (-1, 0),
'E': (1, 0),
}
def get_direction(ch: str) -> Tuple[int, int]:
"""Coordinates point for direction
Args:
ch: str - direction as a single letter UDLR or NEWS
Returns:
tuple (x, y) - direction coordinates.
E.g.:
N -> (0, 1) # north
S -> (0, -1) # south
L -> (-1, 0) # left
Raises KeyError:
if direction char not in allowed directions.
"""
try:
return (
RELATIVE_DIRECTIONS.get(ch, None) or
GEOGRAPHICAL_DIRECTIONS[ch]
)
except KeyError:
raise KeyError(
f'No such direction {ch}. Available directions: NSWE and UDLR'
)
def get_target_point(start: Tuple[int, int], steps: str) -> Tuple[int, int]:
"""Coordinates of target point based on start point and steps.
Args:
start: tuple (x, y) - coordinates of the starting point.
steps: string e.g U15 - contains direction (U D L R or N E S W)
and number of steps.
Directions - two direction systems are possible:
Relative orientations:
U - Up -> (0, 1)
D - Down -> (0, -1)
L - Left -> (-1, 0)
R - Right -> (1, 0)
Geographical directions:
N - North -> (0, 1)
E - East -> (1, 0)
S - South -> (0, -1)
W - West -> (-1, 0)
Returns:
tuple (x, y) - coordinates of the target point.
"""
x, y = start
ch, n = steps[0], int(steps[1:])
dx, dy = get_direction(ch)
return x + n * dx, y + n * dy
def path_points(start, steps):
"""Generate coordinates of each path point based on start point and steps.
Args:
start: tuple (x, y) - coordinates of the starting point.
steps: string e.g U15 - contains direction (UDLR or NESW)
and number of steps.
Directions - two direction systems are possible:
Relative orientations:
U - Up -> (0, 1)
D - Down -> (0, -1)
L - Left -> (-1, 0)
R - Right -> (1, 0)
Geographical directions:
N - North -> (0, 1)
E - East -> (1, 0)
S - South -> (0, -1)
W - West -> (-1, 0)
Yields:
tuple (x, y) - point.
"""
x, y = start
ch, n = steps[0], int(steps[1:])
dx, dy = get_direction(ch)
tx, ty = x + n * dx, y + n * dy
while not (x == tx and y == ty):
x += dx
y += dy
yield x, y
|
/santa_helpers-0.0.2.tar.gz/santa_helpers-0.0.2/santa_helpers/paths.py
| 0.920133 | 0.569583 |
paths.py
|
pypi
|
import logging
from typing import List
from typing import Optional
from sqlalchemy import String, Float, Integer, select, insert, update
from sqlalchemy.orm import DeclarativeBase, Session, Mapped, mapped_column
from sqlalchemy.exc import IntegrityError
from datetime import datetime
from .parsers import parse_daily_measure, parse_hourly_measure, date_to_dt
logger = logging.getLogger(__name__)
class Base(DeclarativeBase):
pass
class DailyMeasure(Base):
__tablename__ = "santacruz_watersmart_daily"
ts: Mapped[int] = mapped_column(Integer, primary_key=True)
date: Mapped[str] = mapped_column(String(20))
consumption: Mapped[float] = mapped_column(Float)
precipitation: Mapped[Optional[float]]
temperature: Mapped[Optional[float]]
def __repr__(self) -> str:
return f"DailyMeasure(ts={self.ts!r}, date={self.date!r}, consumption={self.consumption!r})"
class HourlyMeasure(Base):
__tablename__ = "santacruz_watersmart_hourly"
ts: Mapped[int] = mapped_column(Integer, primary_key=True)
date: Mapped[str] = mapped_column(String(20))
consumption: Mapped[float] = mapped_column(Float)
leak: Mapped[Optional[float]]
flags: Mapped[Optional[str]]
def __repr__(self) -> str:
return f"HourlyMeasure(ts={self.ts!r}, date={self.date!r}, consumption={self.consumption!r})"
class SQLStorageClass():
def __init__(self, engine, insert=insert):
self.engine = engine
self.insert = insert
self.type = type(self)
self.daily_class = DailyMeasure
self.hourly_class = HourlyMeasure
self.daily_measure_parser = parse_daily_measure
self.hourly_measure_parser = parse_hourly_measure
with Session(engine) as session:
Base.metadata.create_all(self.engine)
def save(self, class_name, dataset):
if class_name == self.daily_class:
return self.save_daily(dataset)
elif class_name == self.hourly_class:
return self.save_hourly(dataset)
def save_daily(self, dataset):
with Session(self.engine) as session:
for row in dataset:
dt = date_to_dt(row['categories'])
ts = int(dt.timestamp())
date = dt.isoformat()
values = {
"date": date,
"consumption": row['consumption'],
"temperature": row['temperature'],
"precipitation": row['precipitation']
}
try:
query = self.insert(DailyMeasure).values(
ts=ts,
**values
).on_conflict_do_update(index_elements=DailyMeasure.__table__.primary_key, set_=values)
session.execute(query)
except IntegrityError as e:
query = update(DailyMeasure).where(ts == ts).values(**values)
session.execute(query)
except Exception as e:
logger.error(e)
session.commit()
def save_hourly(self, dataset):
with Session(self.engine) as session:
for row in dataset:
ts = row['ts']
dt = datetime.fromtimestamp(ts)
date = dt.isoformat()
values = {
"date": date,
"consumption": row['gallons'],
"leak": row['leak_gallons'],
"flags": "|".join(row['flags']) if row['flags'] is not None else None
}
try:
query = self.insert(HourlyMeasure).values(
ts=ts,
**values
).on_conflict_do_update(index_elements=HourlyMeasure.__table__.primary_key, set_=values)
session.execute(query)
except IntegrityError as e:
query = update(HourlyMeasure).where(ts == ts).values(**values)
session.execute(query)
except Exception as e:
logger.error(e)
session.commit()
def get_history(self, class_name, entity_parser, from_ts=None, to_ts=None, limit=None, offset=None, ascending=True):
with Session(self.engine) as session:
query = select(class_name)
if from_ts and isinstance(from_ts, int):
query = query.where(class_name.ts >= from_ts)
if to_ts and isinstance(to_ts, int):
query = query.where(class_name.ts <= to_ts)
if ascending:
query = query.order_by(class_name.ts.asc())
else:
query = query.order_by(class_name.ts.desc())
if limit and isinstance(limit, int):
query = query.limit(limit)
if offset and isinstance(offset, int):
query = query.offset(offset)
dataset = []
for row in session.execute(query):
dataset.append(entity_parser(row[0]))
return dataset
@property
def last_ts(self, class_name):
with Session(self.engine) as session:
query = select(class_name).order_by(class_name.ts.desc())
logger.info(query)
row = session.execute(query).first()
if row:
return row[0].ts
else:
return None
|
/santacruz_watersmart-0.1.1.tar.gz/santacruz_watersmart-0.1.1/santacruz_watersmart/storage_class.py
| 0.752649 | 0.363534 |
storage_class.py
|
pypi
|
from functools import wraps
from inspect import isawaitable
from typing import Dict, Optional, Type, get_args, get_origin, get_type_hints
from pydantic import ValidationError
from sanic import Request
from sanic.exceptions import SanicException
from .fields import MethodType, SanticModel
from .utils import validate_method_fields
ARRAY_TYPES = {list, tuple}
def has_array_type(hint_value) -> Optional[bool]:
if get_origin(hint_value) in ARRAY_TYPES:
return True
list_args = get_args(hint_value)
for args in list_args:
origin = get_origin(args)
if origin == MethodType:
origin = get_origin(args.__args__[0])
if origin in ARRAY_TYPES:
return True
def clean_data(schema, raw_data) -> Dict[str, any]:
hints = get_type_hints(schema)
data = {}
for key in raw_data:
if not hints.get(key):
continue
is_array = has_array_type(hints[key])
if is_array:
value = raw_data.getlist(key)
else:
value = raw_data.get(key)
if value:
data[key] = value
return data
def validate_schema(
body: Optional[Type[object]] = None,
query: Optional[Type[object]] = None,
method_replace_value=False,
):
"""
Simple validation
"""
def decorator(f):
@wraps(f)
async def decorated_function(*args, **kwargs):
if args and isinstance(args[0], Request):
request: Request = args[0]
elif len(args) > 1:
request: Request = args[1]
else:
raise SanicException("Request could not be found")
try:
if query:
cleaned_data = clean_data(
query,
request.args,
)
kwargs["query"] = query(
**cleaned_data,
)
if body:
if request.headers["content-type"] == "application/json":
cleaned_data = request.json
else:
cleaned_data = clean_data(
body,
request.form,
)
kwargs["body"] = body(**cleaned_data)
if isinstance(kwargs["body"], SanticModel):
if kwargs.get("query"):
kwargs["body"]._context = {"query": kwargs["query"]}
await validate_method_fields(
kwargs["body"], replace_value=method_replace_value
)
except ValidationError as err:
raise SanicException(
"Validation error",
context={
"detail": {
error["loc"][0]: error["msg"] for error in err.errors()
}
},
status_code=400,
)
retval = f(*args, **kwargs)
if isawaitable(retval):
retval = await retval
return retval
return decorated_function
return decorator
|
/santic_validation-0.0.5-py3-none-any.whl/santic_validation/decorator.py
| 0.738386 | 0.208783 |
decorator.py
|
pypi
|
[](https://badge.fury.io/py/santoku)
[](https://github.com/wiris/santoku/actions/workflows/cd.yml)
[](https://github.com/psf/black)
# What is Santoku?
Santoku is a toolkit written in Python for interacting with AWS, Google Cloud platform, Salesforce and Slack.
The purpose of Santoku is to have the interactions with all the external services collected in a single package. The package contains wrappers around the respective APIs and high level methods for the most common patterns in order to simplify the interaction with those services, whether by being shorter to type, more descriptive, more specific to our needs or simply easier to read for developers.
## Quickstart
### Installation
If you have the wheel, you can install it with:
```bash
pip install --upgrade --force-reinstall dist/santoku-*.whl
```
Run the following command to install it from PyPI:
```bash
pip install santoku
```
Or use the following to install it via Poetry
```bash
poetry add santoku
```
### How To Use It
You can use the package as follows:
```python
from santoku.slack.slack_bot_handler import SlackBotHandler
slack_bot = SlackBotHandler.from_aws_secrets_manager("your_secret")
```
## Content
The package `santoku` contains several subpackages: `aws`, `google`, `salesforce`, `slack`, `sql`. Each subpackage provides connection to different external services and are formed by a collection of modules, where each module consists of handlers for more specific services. Each handler class has unit tests to ensure the correct behaviour of the methods of these classes.
### AWS
AWS (Amazon Web Services) is a cloud computing platform that provides a set of primitive abstract technical infrastructure and distributed computing building blocks and tools.
The connection to AWS has been done through the AWS SDK, which in Python is called [boto3](https://github.com/boto/boto3). We provide wrappers of the `boto3` SDK to make easy the operations to interact with different services.
The use of this subpackage requires having AWS credentials somewhere. We provide flexibility to either keep credentials in AWS credentials/configuration file, set environment variables, or to pass them directly as arguments in the initializer of each handler class. More info on AWS configurations and credentials [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html).
The unit tests in this subpackage implement mocks to the AWS services and do not pretended to access or modify the environment of your real account. In order to have safer unit tests for the AWS subpackage, we use moto, a mocking library for most AWS services, which allows our methods to interact with a fully mocked version of the AWS environment via decorators while not needing an actual connection to the internet or a test AWS account.
#### Amazon S3
Amazon Simple Storage Service (Amazon S3) is an object storage service that offers scalability, data availability, security, and performance.
We provide methods to easily list and delete objects inside buckets; read and write content within S3 objects; upload a dataframe into csv or parket format to a specific location; generate and upload an Amazon Quicksight manifest in S3 in order to create analysis in Amazon Quicksight, and so on.
An object can be uploaded to S3 with the following:
```python
from santoku.aws.s3_handler import S3Handler
s3_handler = S3Handler()
s3_handler.put_object(bucket="your_bucket_name", object_key="your_object_key", content="Your object content.")
```
#### AWS Secrets Manager
AWS Secrets Manager protects secrets needed to access applications, services, and IT resources. The service allows rotating, managing, and retrieving credentials, keys, and other secrets.
We provide methods to get the content of a previously created secret.
```python
from santoku.aws.secrets_manager_handler import SecretsManagerHandler
secrets_manager = SecretsManagerHandler()
secret_content = secrets_manager.get_secret_value(secret_name="your_secret_name")
```
We use this service as our default credential manager. Most classes that require some form of authentication in santoku are provided with alternative class methods that retrieve the credentials directly from Secrets Manager. For example, instead of directly providing credentials to the BigQuery handling class, we simply provide it with the name of the secret where they are stored:
```python
from santoku.google.bigquery import BigQueryHandler
bigquery_handler = BigQueryHandler(
type="your_type",
project_id="your_project_id"
private_key_id="your_private_key_id"
private_key="your_private_key"
client_email="your_client_email"
client_id="your_client_id"
auth_uri="your_auth_uri"
token_uri="your_token_uri"
auth_provider_x509_cert_url="your_auth_provider_x509_cert_url"
client_x509_cert_url="your_client_x509_cert_url"
)
```
or
```python
bigquery_handler = BigQueryHandler.from_aws_secrets_manager(
secret_name="your_secret_name"
)
```
#### Amazon Simple Queue Service
Amazon Simple Queue Service (SQS) is a fully managed message queuing service that supports programmatic sending of messages via web service applications as a way to communicate over the Internet.
We provide methods to receive, delete, and send single or a batch of messages.
```python
from santoku.aws.sqs_handler import SQSHandler
sqs_handler = SQSHandler()
entries = [
{
"Id": "Id1",
"MessageBody": "Your message 1",
},
{
"Id": "Id2",
"MessageBody": "Your message 1",
}
]
sqs_handler.send_message_batch(queue_name="your_queue_name", entries=entries)
```
### Google Cloud Platform
Google Cloud Platform a suite of cloud computing services provided by Google that runs on the same Cloud infrastructure that Google uses internally for its end-user products.
The connection to Google Cloud Platform has been done using the [google-cloud-core](https://googleapis.dev/python/google-api-core/latest/index.html) package.
The use of this subpackage requires having [Google Cloud Platform credentials](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually) (in this case, a service account for programmatic access), these can be passed as arguments in the initializer of the handler class directly, or you can store them in AWS Secrets Manager and retrieve them during the initialization using the class method instead.
We provide a handler that allows doing queries on BigQuery services:
```python
query_results = bigquery_handler.get_query_results(query="SELECT * FROM `your_table`")
```
### Salesforce
Salesforce is a Customer Relationship Management (CRM) platform that gives to the marketing, sales, commerce, and service depertments a single, shared view of every customer.
The connection to Salesforce has been done using the [Salesforce REST API](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/quickstart.htm).
The use of this subpackage requires having Salesforce credentials, these can be passed as arguments in the initializer of the handler class directly, or you can store them in AWS Secrets Manager and retrieve them during the initialization using the class method instead.
This subpackage provide methods to insert/modify/delete salesforce object records. You can perform operations by doing HTTP requests directly or using methods with higher level of abstraction, which are easier to handle. The lasts ones are just wrappers of the HTTP request method. To obtain records you can perform queries using SOQL.
The unit tests require valid Salesforce credentials to be executed. The tests are implemented in the way that no new data will remain in the account and no existent data will be modified. However, having Salesforce credentials for sandbox use is recommended.
You can use the package to perform a request as follows.
```python
from santoku.salesforce.objects_handler import ObjectsHandler
objects_handler = ObjectsHandler(
auth_url="your_auth_url",
username="your_username",
password="your_password",
client_id="your_client_id",
client_secret="your_client_secret",
)
contact_payload = {"FirstName": "Alice", "LastName": "Ackerman", "Email": "[email protected]"}
objects_handler.do_request(method="POST", path="sobjects/Contact", payload=contact_payload)
```
or insert a record with
```python
objects_handler.insert_record(sobject="Contact", payload=contact_payload)
```
Finally, you can do a SOQL query with:
```python
records = objects_handler.do_query_with_SOQL("SELECT Id, Name from Contact")
```
### Slack
Slack is a proprietary business communication platform. A Slack Bot is a nifty way to run code and automate tasks. In Slack, a bot is controlled programmatically via a bot user token that can access one or more of Slack’s APIs.
The connection to Slack has been done using the [Slack Web API](https://slack.dev/python-slackclient/basic_usage.html).
The use of this subpackage requires having Slack API Token of a Slack Bot, which can be passed as argument in the initializer of the handler class directly, or you can store it in AWS Secrets Manager and retrieve it during the initialization using the class method instead.
This subpackage provide methods to send messages to a channel. A message can be sent with:
```python
from santoku.slack.slack_bot_handler import SlackBotHandler
slack_bot_handler = SlackBotHandler(api_token="your_api_token")
slack_bot_handler.send_message(channel="your_chanel_name", message="Your message.")
```
### SQL
SQL (Structured Query Language) is a domain-specific language designed for managing data held in a relational database management system (RDBMS). The purpose of this subpackage is to provide connection to different RDBMSs.
#### MySQL
MySQL is an open-source RDBMS. The connection to MySQL has been done using the [MySQL Connector for python](https://dev.mysql.com/doc/connector-python/en/).
The use of this subpackage requires having MySQL authentication parameters, which can be passed as argument in the initializer of the handler class directly, or you can store it in AWS Secrets Manager and retrieve it during the initialization using the class method instead.
This subpackage provides methods to do queries and retrieve the results in different forms.
```python
from santoku.sql.mysql_handler import MySQLHandler
mysql_handler = MySQLHandler(user="your_user", password="your_password", host="your_host", database="your_database")
mysql_handler.get_query_results(query="SELECT * FROM your_table")
```
## Development
### Project
We use Poetry to handle this project. Poetry is a tool for dependency management and packaging in Python. It allows you to declare the libraries your project depends on and it will manage (install/update) them for you. More detais in [their documentation](https://python-poetry.org/docs/basic-usage/).
Poetry is already included in the development environment.
#### Dependencies
If you want to add dependencies to your project, you can specify them in the `tool.poetry.dependencies` section of the `pyproject.toml` file. Also, instead of modifying the `pyproject.toml` file by hand, you can use the add command:
```bash
poetry add <package_name>
```
Poetry will automatically find a suitable version constraint and install the package and subdependencies.
**Note:** Remember to commit changes of `poetry.lock` and `pyproject.toml` files after adding a new dependency.
**Note:** You can find more details on how to handle versions of packages [here](https://python-poetry.org/docs/dependency-specification/).
### Environment
We provide a development environment that uses the Visual Studio Code Remote - Containers extension. This extension lets you use a Docker container in order to have a consistent and easily reproducible development environment.
The files needed to build the container are located in the `.devcontainer` directory:
* `devcontainer.json` contains a set of configurations, tells VSCode how to access the container and which extensions it should install.
* `Dockerfile` defines instructions for the building of the container image.
More info [here](https://code.visualstudio.com/docs/remote/containers-tutorial).
#### Environment Variables
The containerized environment will automatically set as environment variables the your variables stored in a `.env` file at the top level of the repository. For example, this is required for certain tests that require credentials, which are (of course) not versioned. Be aware that:
* The Docker image building process will **fail** if you do not include a `.env` file at the top level of the repository.
* If you change the contents of the `.env` file you will need to rebuild the container for the changes to take effect within the environment.
#### Sharing Git credentials with your container
The containerized environment will automatically forward your local SSH agent if one is running. More info [here](https://code.visualstudio.com/docs/remote/containers#_using-ssh-keys). It works for Windows and Linux.
### Creating a PR
Create a pull request (PR) to propose and collaborate on changes to the project. These changes MUST BE proposed in a branch, which ensures that the main branch only contains finished and approved work.
Be sure to run tests locally before commiting your changes.
#### Running tests
The tests are implemented with pytest and there are unit tests for each of the handler modules. Tests in the `aws` subpackage implement mocks to S3 and do not require real credentials, however, the remaining tests in other subpackages do.
To run the tests just execute `pytest` if you are already inside Poetry virtual environment or `poetry run pytest`.
Moreover, when a PR is created a GitHub Actions CI pipeline (see [`.github/workflows/ci.yml`](./.github/workflows/ci.yml)) is executed. This pipeline is in charge of running tests.
### Release
Wheel is automatically created and uploaded to PyPI by the GitHub Actions CD pipeline (see [`.github/workflows/cd.yml`](./.github/workflows/cd.yml)) when the PR is merged in main branch.
## Why Santoku?
From Wikipedia:
```text
The Santoku bōchō (Japanese: 三徳包丁; "three virtues" or "three uses") or Bunka bōchō (文化包丁) is a general-purpose kitchen knife originating in Japan. Its blade is typically between 13 and 20 cm (5 and 8 in) long, and has a flat edge and a sheepsfoot blade that curves down an angle approaching 60 degrees at the point. The term Santoku may refer to the wide variety of ingredients that the knife can handle: meat, fish and vegetables, or to the tasks it can perform: slicing, chopping and dicing, either interpretation indicating a multi-use, general-purpose kitchen knife.
```
|
/santoku-221018.38.tar.gz/santoku-221018.38/README.md
| 0.566019 | 0.961353 |
README.md
|
pypi
|
from .type import Type
from .node import Node
from .graph import Graph
from .runtime_error import RuntimeError
class Arc(Type):
def __init__(self, source=None, target=None, weight=0.0, type_="directed"):
self.source = source or Node(0.0)
self.target = target or Node(0.0)
self.weight = weight
self.type = type_
def setWeight(self, weight):
self.weight = weight
return self
def setType(self, type_):
self.type = type_
return self
def setSource(self, source):
self.source = source
return self
def setTarget(self, target):
self.target = target
return self
def cast(self, type_):
from .num import Num
from .logic import Logic
if type_ == "arc":
return self
elif type_ == "graph":
return Graph(tuple([self]))
elif type_ == "num":
return Num(self.weight)
elif type_ == "logic":
return Logic(True) if self.source and self.target else Logic(False)
else:
RuntimeError.cast_error("arc", type_)
def put(self):
self.source.put()
print(f" {'<' if not self._is_directed else ''}-[{self.weight}]-> ", end="")
self.target.put()
def summation(self, value):
self.weight += value.value
return self
def subtraction(self, value):
self.weight -= value.value
return self
def multiplication(self, value):
self.weight *= value.value
return self
def division(self, value):
self.weight /= value.value
return self
def greater_or_equal(self, value):
from .logic import Logic
return Logic(self.weight >= value.cast("arc").weight)
def less_or_equal(self, value):
from .logic import Logic
return Logic(self.weight <= value.cast("arc").weight)
def greater(self, value):
from .logic import Logic
return Logic(self.weight > value.cast("arc").weight)
def less(self, value):
from .logic import Logic
return Logic(self.weight < value.cast("arc").weight)
def _is_directed(self):
return True if self.type == "directed" else False
|
/sanya_script_runtime-0.1.5.tar.gz/sanya_script_runtime-0.1.5/sanya_script_runtime/arc.py
| 0.688049 | 0.169286 |
arc.py
|
pypi
|
from .type import Type
from .runtime_error import RuntimeError
class Graph(Type):
def __init__(self, elements=()):
self.nodes = set()
self.arcs = set()
self.set_elements(list(elements))
def set_elements(self, elements):
self._resolve_elements(elements)
def cast(self, type_):
from .logic import Logic
if type_ == "graph":
return self
elif type_ == "logic":
return Logic(True)
else:
RuntimeError.cast_error("graph", type_)
def all_elements(self):
return self.nodes.union(self.arcs)
def put(self):
for arc in self.arcs:
arc.puts()
for i, node in enumerate(self.nodes):
if i != 0: print(", ", end="")
node.put()
def summation(self, value):
return self.__class__(self.all_elements().union(value.all_elements()))
def subtraction(self, value):
return self.__class__(self.all_elements().difference(value.all_elements()))
# TODO: operations
def multiplication(self, value):
pass
def division(self, value):
pass
def greater_or_equal(self, value):
from .logic import Logic
def less_or_equal(self, value):
from .logic import Logic
def greater(self, value):
from .logic import Logic
def less(self, value):
from .logic import Logic
def _resolve_elements(self, elements):
for element in elements:
if element.__class__.__name__ == "Node":
self.nodes.add(element)
elif element.__class__.__name__ == "Arc":
self._resolve_arc(element)
elif element.__class__.__name__ == "Graph":
self._resolve_graph(element)
def _resolve_arc(self, arc):
self.arcs.add(arc)
self.nodes.update([arc.source, arc.target])
def _resolve_graph(self, graph):
self.nodes = self.nodes.union(graph.nodes)
for arc in graph.arcs:
self._resolve_arc(arc)
|
/sanya_script_runtime-0.1.5.tar.gz/sanya_script_runtime-0.1.5/sanya_script_runtime/graph.py
| 0.480966 | 0.186502 |
graph.py
|
pypi
|
import getopt
import io
import signal
import sys
import unicodedata
USAGE = """Usage: szu-t [options] table_file [file ...]
Translate CJK text using a translation table.
Options:
-h, --help print this help message and exit
-v, --verbose include information useful for debugging
"""
def set_stdio_utf8():
"""
Set standard I/O streams to UTF-8.
Attempt to reassign standard I/O streams to new streams using UTF-8.
Standard input should discard any leading BOM. If an error is raised,
assume the environment is inflexible but correct (IDLE).
"""
try:
sys.stdin = io.TextIOWrapper(
sys.stdin.detach(), encoding='utf-8-sig', line_buffering=True)
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding='utf-8', line_buffering=True)
sys.stderr = io.TextIOWrapper(
sys.stderr.detach(), encoding='utf-8', line_buffering=True)
except io.UnsupportedOperation:
pass
def read_table(table_fd):
"""
Read a translation table from an opened file.
Given an open file object, read a well-formatted translation table and
return its contents to the caller.
"""
table_str = unicodedata.normalize('NFC', table_fd.read())
table = []
for line in table_str.split('\n'):
stripped = line.strip()
if stripped != '':
table.append(stripped.split('|'))
return table
def vocab(table, text):
"""
Return a new table containing only the vocabulary in the source text.
Create a new translation table containing only the rules that are
relevant for the given text. This is created by checking all source
terms against a copy of the text.
"""
text_rules = []
text_copy = str(text)
for rec in table:
if rec[0] in text_copy:
text_copy = text_copy.replace(rec[0], '\x1f')
text_rules.append(rec)
return text_rules
def tr_raw(table, text):
"""
Translate text using a table. Return raw texts in a list.
Perform translation of a text by applying the rules in a translation
table. The result is a list of strings with each element corresponding
to a column in the translation table.
"""
text = unicodedata.normalize('NFC', text).replace('\x1f', '')
rules = vocab(table, text)
collection = [text]
for col_no in range(1, len(table[0])):
trans = text
for rec in rules:
trans = trans.replace(rec[0], '\x1f' + rec[col_no] + '\x1f')
trans = trans.replace('\x1f\n', '\n')
trans = trans.replace('\x1f\x1f', ' ')
trans = trans.replace('\x1f', ' ')
collection.append(trans)
return collection
def tr_fmt(table, buffer, start):
"""
Translate text using a table. Return a formatted listing string.
Perform translation of a text by applying rules in a translation table,
and return a formatted string. The formatted string represents the
source text and its translations collated together and organized by
line number and by translation table column number.
"""
collection = tr_raw(table, buffer)
for i in range(0, len(collection)):
collection[i] = collection[i].rstrip().split('\n')
listing = ''
for line_no in range(0, len(collection[0])):
for col_idx in range(0, len(table[0])):
listing += '%d.%d|%s\n' % (
start + line_no,
col_idx + 1,
collection[col_idx][line_no])
listing += '\n'
return listing
def tr_file(table, fd_in, fd_out, start_idx=1, buf_size=100):
"""
Translate from one file to another (buffered).
Given a table, an input file object, and an output file object, apply
the translation table rules to the input text and write the translation
as a formatted string to the output.
"""
str_buf = ''
line_no = start_idx
for line in fd_in:
str_buf += line
if line_no % buf_size == 0:
fd_out.write(tr_fmt(table, str_buf, line_no - buf_size + 1))
str_buf = ''
line_no += 1
if len(str_buf) > 0:
position = line_no - str_buf.count('\n')
fd_out.write(tr_fmt(table, str_buf, position))
return line_no
def main(argv):
"""
Run as a portable command-line program.
This program reads and writes UTF-8 text, and uses standard I/O streams
for input text and translation output. Input has any leading byte-order
marks stripped out from the beginning of the input stream. Broken pipes
and SIGINT are handled silently.
"""
set_stdio_utf8()
if 'SIGPIPE' in dir(signal):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
verbose = False
try:
opts, args = getopt.getopt(argv[1:], 'hv', ['help', 'verbose'])
for option, _ in opts:
if option in ('-h', '--help'):
print(USAGE, end='')
return 0
if option in ('-v', '--verbose'):
verbose = True
if len(args) < 1:
sys.stderr.write(USAGE)
return 1
with open(args[0], 'r', encoding='utf-8-sig') as table_fd:
table = read_table(table_fd)
if len(args) == 1:
if sys.stdin.isatty():
tr_file(table, sys.stdin, sys.stdout, start_idx=1, buf_size=1)
else:
tr_file(table, sys.stdin, sys.stdout)
else:
idx = 1
for file_path in args[1:]:
with open(file_path, 'r', encoding='utf-8-sig') as fin:
idx = tr_file(table, fin, sys.stdout, idx)
return 0
except KeyboardInterrupt:
print()
return 1
except Exception as err:
if verbose:
raise
else:
sys.stderr.write('szu-t: ' + str(err) + '\n')
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
/sanzang-utils-1.3.3.tar.gz/sanzang-utils-1.3.3/szu_t.py
| 0.417034 | 0.301542 |
szu_t.py
|
pypi
|
from sap.audit_logging.util import check_boolean, check_non_empty_string, validate_object
from sap.audit_logging.messages.audit_message import AuditMessage
DATA_ACCESS_ENDPOINT = '/data-accesses'
class DataAccessMessage(AuditMessage):
''' DataAccessMessage '''
def __init__(self, logger):
# pylint: disable=super-with-arguments
super(DataAccessMessage, self).__init__(logger, DATA_ACCESS_ENDPOINT)
self._mandatory_properties.extend(['attributes', 'object', 'data_subject'])
def set_channel(self, channel):
'''
Sets the data access channel type (e.g. RFC, web service, IDOC,
file based interfaces, user interface, spool, printing etc.).
:param channel: The type of data access channel
'''
check_non_empty_string(channel, 'channel')
self._message['channel'] = channel
return self
def set_object(self, object_attributes):
'''
Sets the object properties.
:param object_attributes: Dict containing the object attributes.
'''
validate_object(object_attributes, 'object_attributes')
self._message['object'] = object_attributes
return self
def set_data_subject(self, data_subject):
'''
Sets the data_subject properties.
:param data_subject: Dict containing the data_subject attributes.
'''
validate_object(data_subject, 'data_subject')
self._message['data_subject'] = data_subject
return self
def add_attribute(self, name, is_successful):
'''
Sets the heading name for the attribute that has been read, either successfully or not.
:param name: The attribute name
:param is_successful: Whether or not the access to the attribute was successful
'''
check_boolean(is_successful, 'is_successful')
self._add_entity({
'name': name,
'successful': is_successful
}, 'attributes', 'name')
return self
def add_attachment(self, attachment_id, name):
'''
Sets the attachment name and id in case the event is triggered
by the download or display of some attachments or files.
:param attachment_id: Attachment id
:param name: Attachment name
'''
check_non_empty_string(name, 'name')
self._add_entity({
'id': attachment_id,
'name': name
}, 'attachments', 'id')
return self
|
/sap_audit_logging-1.3.1-py3-none-any.whl/sap/audit_logging/messages/data_access_message.py
| 0.744192 | 0.209996 |
data_access_message.py
|
pypi
|
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import os
import time
from typing import Iterator, List, Union
from sap_business_document_processing.common.http_client_base import CommonClient
from sap_business_document_processing.common.helpers import get_ground_truth_json, function_wrap_errors
from .constants import API_DATASET_ID_FIELD, API_DATASETS_FIELD, API_DEPLOYMENT_ID_FIELD, API_DEPLOYMENTS_FIELD, \
API_DOCUMENT_FIELD, API_DOCUMENT_ID_FIELD, API_GROUND_TRUTH_FIELD, API_MIME_TYPE_FIELD, API_MODELS_FIELD, \
API_MODEL_NAME_FIELD, API_MODEL_VERSION_FIELD, API_PAGINATION_COUNT_PARAM, API_PAGINATION_SKIP_PARAM, \
API_PAGINATION_TOP_PARAM, API_PARAMETERS_FIELD, API_RESULTS_FIELD, API_STRATIFICATION_SET_FIELD, DATASETS_ENDPOINT,\
DATASET_DOCUMENTS_ENDPOINT, DATASET_DOCUMENT_ENDPOINT, DATASET_ENDPOINT, DEPLOYMENTS_ENDPOINT, DOCUMENTS_ENDPOINT, \
DOCUMENT_RESULT_ENDPOINT, MODEL_DEPLOYMENT_ENDPOINT, MODEL_TRAINING_JOBS_ENDPOINT, TRAINED_MODELS_ENDPOINT, \
TRAINED_MODEL_ENDPOINT
class DCApiClient(CommonClient):
"""
This class provides an interface to access SAP Document Classification REST API from a Python application.
The structure of values returned by all the methods is documented in the API reference:
https://help.sap.com/viewer/ca60cd2ed44f4261a3ae500234c46f37/SHIP/en-US/c1045a561faf4ba0ae2b0e7713f5e6c4.html
:param base_url: The service URL taken from the service key (key 'url' in service key JSON)
:param client_id: The XSUAA client ID taken from the service key (key 'uaa.clientid' in service key JSON)
:param client_secret: The XSUAA client secret taken from the service key (key 'uaa.clientsecret' in service key JSON)
:param uaa_url: The XSUAA URL taken from the service key (key 'uaa.url' in service key JSON)
:param polling_threads: Number of threads used to poll for asynchronous DC APIs
:param polling_sleep: Number of seconds to wait between the polling attempts for most of the APIs,
the minimal value is 0.2
:param polling_long_sleep: Number of seconds to wait between the polling attempts for model training and
deployment operations, the minimal value is 0.2
:param polling_max_attempts: Maximum number of attempts used to poll for asynchronous DC APIs
:param logging_level: INFO level will log the operations progress, the default level WARNING should not
produce any logs
"""
def __init__(self,
base_url,
client_id,
client_secret,
uaa_url,
url_path_prefix='document-classification/v1/',
polling_threads=5,
polling_sleep=5,
polling_long_sleep=30,
polling_max_attempts=120,
logging_level=logging.WARNING):
"""
Creates a new instance of a client object to access the SAP Document Classification service
"""
super(DCApiClient, self).__init__(base_url=base_url,
client_id=client_id,
client_secret=client_secret,
uaa_url=uaa_url,
polling_threads=polling_threads,
polling_sleep=polling_sleep,
polling_long_sleep=polling_long_sleep,
polling_max_attempts=polling_max_attempts,
url_path_prefix=url_path_prefix,
logger_name='DCApiClient',
logging_level=logging_level)
# Inference
def classify_document(self, document_path: str, model_name, model_version, reference_id=None, mime_type=None) -> dict:
"""
Submits request for document classification, checks the response and returns the reference ID for the
uploaded document
:param document_path: Path to the PDF file on the disk
:param model_name: The name of the model that was successfully deployed to be used for the classification
:param model_version: The version of the model that was successfully deployed to be used for the classification
:param reference_id: In case the document reference ID has to be managed by the user, it can be specified.
In this case the user is responsible for providing unique reference IDs for different documents
:param mime_type: The file type of the document uploaded
:return: Object containing the reference ID of the classified document and the classification results
"""
options = {}
if reference_id is not None:
options[API_DOCUMENT_ID_FIELD] = reference_id
if mime_type is not None:
options[API_MIME_TYPE_FIELD] = mime_type
data = {API_PARAMETERS_FIELD: json.dumps(options)}
with open(document_path, 'rb') as file:
response = self.post(DOCUMENTS_ENDPOINT(modelName=model_name, modelVersion=model_version),
files={API_DOCUMENT_FIELD: file},
data=data,
log_msg_before=f'Submitting document {document_path} for classification',
log_msg_after=f'Document {document_path} submitted for classification '
f'successfully, waiting for result')
classification_job = response.json()
result = self._poll_for_url(DOCUMENT_RESULT_ENDPOINT(modelName=model_name,
modelVersion=model_version,
id=classification_job[API_DOCUMENT_ID_FIELD]),
wait_status=409).json()
return result
def _classify_document_wrap_errors(self,
document_path,
model_name,
model_version,
reference_id=None,
mime_type=None):
result = function_wrap_errors(self.classify_document, document_path, model_name, model_version,
reference_id, mime_type)
if isinstance(result, Exception):
result.document_path = document_path
else:
result['document_path'] = document_path
return result
def classify_documents(self, documents_paths: List[str], model_name, model_version) -> Iterator[dict]:
"""
Submits requests for classification of multiple documents, checks the response and returns the reference ID
for the classified documents
:param documents_paths: Paths to the PDF files on the disk
:param model_name: The name of the model that was successfully deployed to be used for the classification
:param model_version: The version of the model that was successfully deployed to be used for the classification
:return: An iterator of objects containing the reference ID of the classified document and the classification
results
"""
number_of_documents = len(documents_paths)
assert number_of_documents > 0, 'Please supply at least one document'
self.logger.debug(f'Started classification of {number_of_documents} documents against the model {model_name}'
f' with version {model_version} in parallel using {self.polling_threads} threads')
with ThreadPoolExecutor(min(self.polling_threads, number_of_documents)) as pool:
results = pool.map(self._classify_document_wrap_errors, documents_paths, [model_name] * number_of_documents,
[model_version] * number_of_documents)
self.logger.info(f'Finished classification of {number_of_documents} documents against the model {model_name} '
f'with version {model_version}')
return self._create_result_iterator(results)
# Training
def create_dataset(self) -> dict:
"""
Creates an empty dataset
:return: Object containing the dataset id
"""
response = self.post(DATASETS_ENDPOINT,
log_msg_before='Creating a new dataset',
log_msg_after='Successfully created a new dataset')
return response.json()
def delete_dataset(self, dataset_id) -> dict:
"""
Deletes a dataset with a given ID
:param dataset_id: The ID of the dataset to delete
:return: Object containing the ID of the deleted dataset and the number of documents deleted
"""
response = self.delete(DATASET_ENDPOINT(dataset_id=dataset_id),
log_msg_before=f'Deleting the dataset {dataset_id}',
log_msg_after=f'Successfully deleted the dataset {dataset_id}')
return response.json()
def delete_training_document(self, dataset_id, document_id) -> {}:
"""
Deletes a training document from a dataset
:param dataset_id: The ID of the dataset where the document is located
:param document_id: The reference ID of the document
:return: An empty object
"""
response = self.delete(DATASET_DOCUMENT_ENDPOINT(dataset_id=dataset_id, document_id=document_id),
log_msg_before=f'Deleting the document {document_id} from the dataset {dataset_id}',
log_msg_after=f'Successfully deleted the document {document_id} from the '
f'dataset {dataset_id}')
return response.json()
def get_dataset_info(self, dataset_id) -> dict:
"""
Gets statistical information about a dataset with a given ID
:param dataset_id: The ID of the dataset
:return: Summary information about the dataset that includes the number of documents in different processing
stages
"""
response = self.get(DATASET_ENDPOINT(dataset_id=dataset_id),
log_msg_before=f'Getting information about the dataset {dataset_id}',
log_msg_after=f'Successfully got the information about the dataset {dataset_id}')
return response.json()
def get_datasets_info(self) -> List[dict]:
"""
Gets summary information about the existing datasets
:return: An array of datasets corresponding to the 'datasets' part of the json response
"""
response = self.get(DATASETS_ENDPOINT,
log_msg_before='Getting information about datasets',
log_msg_after='Successfully got the information about the datasets')
return response.json()[API_DATASETS_FIELD]
def get_dataset_documents_info(self, dataset_id, top: int = None, skip: int = None, count: bool = None) -> dict:
"""
Gets the information about all the documents in a specific dataset
:param dataset_id: The ID of an existing dataset
:param top: Pagination: number of documents to be fetched in the current request
:param skip: Pagination: number of documents to skip for the current request
:param count: Flag to show count of number of documents in the dataset
:return: Object that contains an array of the documents
"""
params = {}
if top is not None:
params[API_PAGINATION_TOP_PARAM] = top
if skip is not None:
params[API_PAGINATION_SKIP_PARAM] = skip
if count is not None:
params[API_PAGINATION_COUNT_PARAM] = count
response = self.get(DATASET_DOCUMENTS_ENDPOINT(dataset_id=dataset_id), params=params,
log_msg_before=f'Getting information about the documents in the dataset {dataset_id}',
log_msg_after=f'Successfully got the information about the documents in the '
f'dataset {dataset_id}')
return response.json()
def get_classification_documents_info(self, model_name, model_version) -> List[dict]:
"""
Gets the information about recently classified documents
:param model_name: The name of the model against which the documents were classified
:param model_version: The version of the model against which the documents were classified
:return: An array of document information correspond to the 'results' part of the json response. Each document
information includes its reference ID and the classification status.
"""
response = self.get(DOCUMENTS_ENDPOINT(modelName=model_name, modelVersion=model_version),
log_msg_before=f'Getting information about documents that were recently classified against '
f'the model {model_name} with version {model_version}',
log_msg_after=f'Successfully got the information about the documents that were recently '
f'classified against the model {model_name} with version {model_version}')
return response.json()[API_RESULTS_FIELD]
def upload_document_to_dataset(self, dataset_id, document_path: str, ground_truth: Union[str, dict],
document_id=None, mime_type=None, stratification_set=None) -> dict:
"""
Uploads a single document and its ground truth to a specific dataset
:param dataset_id: The ID of the dataset
:param document_path: The path to the PDF document
:param ground_truth: Path to the ground truth JSON file or an object representing the ground truth
:param document_id: The reference ID of the document
:param mime_type: The file type of the document
:param stratification_set: Defines a custom stratification set (training/validation/test)
:return: Object containing information about the uploaded document
"""
ground_truth_json = get_ground_truth_json(ground_truth)
data = {API_GROUND_TRUTH_FIELD: ground_truth_json}
if document_id is not None:
data[API_DOCUMENT_ID_FIELD] = document_id
if mime_type is not None:
data[API_MIME_TYPE_FIELD] = mime_type
if stratification_set is not None:
data[API_STRATIFICATION_SET_FIELD] = stratification_set
with open(document_path, 'rb') as file:
response = self.post(DATASET_DOCUMENTS_ENDPOINT(dataset_id=dataset_id),
files={API_DOCUMENT_FIELD: file},
data={API_PARAMETERS_FIELD: json.dumps(data)},
log_msg_before=f'Uploading the document {document_path} with ground truth '
f'{str(ground_truth_json)} to the dataset {dataset_id}',
log_msg_after=f'Successfully uploaded the document {document_path} with ground truth '
f'{str(ground_truth_json)} to the dataset {dataset_id}, waiting for the document '
f'processing')
return self._poll_for_url(DATASET_DOCUMENT_ENDPOINT(dataset_id=dataset_id,
document_id=response.json()[API_DOCUMENT_ID_FIELD]),
wait_status=409).json()
def _upload_document_to_dataset_wrap_errors(self, dataset_id, document_path, ground_truth, document_id=None):
result = function_wrap_errors(self.upload_document_to_dataset, dataset_id, document_path, ground_truth,
document_id)
if isinstance(result, Exception):
result.document_path = document_path
else:
result['document_path'] = document_path
return result
def upload_documents_directory_to_dataset(self, dataset_id, path, file_extension='.pdf'):
"""
:param dataset_id: The ID of the dataset to upload the documents to
:param path: The path has to contain document data files and JSON file with GT with corresponding names
:param file_extension: The file format of the documents to be uploaded. Default is '.pdf'
:return: An iterator with the upload results
"""
files = self._find_files(path, file_extension)
files_id = [os.path.splitext(os.path.basename(f))[0] for f in files]
assert len(files_id) > 0, 'No training data found'
ground_truth_files = [os.path.join(path, f + '.json') for f in files_id]
assert len(files_id) == len(ground_truth_files), 'The folder has a different number of documents and ' \
'ground truths'
return self.upload_documents_to_dataset(dataset_id=dataset_id,
documents_paths=files,
ground_truths_paths=ground_truth_files)
def upload_documents_to_dataset(self, dataset_id, documents_paths: List[str],
ground_truths_paths: List[Union[str, dict]]) -> Iterator[dict]:
"""
:param dataset_id: The ID of the dataset to upload the documents to
:param documents_paths: The paths of the PDF files
:param ground_truths_paths: The paths of the JSON files containing the ground truths
:return: An iterator with the upload results
"""
number_of_documents = len(documents_paths)
assert number_of_documents > 0, 'Please supply at least one document'
self.logger.debug(f'Started uploading of {number_of_documents} documents to the dataset {dataset_id} in '
f'parallel using {self.polling_threads} threads')
with ThreadPoolExecutor(min(self.polling_threads, number_of_documents)) as pool:
results = pool.map(self._upload_document_to_dataset_wrap_errors, [dataset_id] * number_of_documents,
documents_paths, ground_truths_paths)
self.logger.info(f'Finished uploading of {number_of_documents} documents to the dataset {dataset_id}')
return self._create_result_iterator(results)
# Model training and management
def train_model(self, model_name, dataset_id) -> dict:
"""
Trigger the process to train a new model version for documents classification, based on the documents in the
specific dataset and wait until this process is finished. The process may take significant time to complete
depending on the size of the dataset.
:param model_name: The name of the new model to train
:param dataset_id: The name of existing dataset containing enough documents for training
:return: Object containing the statistical data about the trained model, including accuracy, recall and
precision
"""
response = self.post(MODEL_TRAINING_JOBS_ENDPOINT(modelName=model_name), json={API_DATASET_ID_FIELD: dataset_id},
validate=False,
log_msg_before=f'Triggering training of the model {model_name} from the dataset {dataset_id}')
if response.status_code == 409:
time.sleep(self.polling_long_sleep)
return self.train_model(model_name, dataset_id)
self.raise_for_status_with_logging(response)
response_json = response.json()
return self._poll_for_url(TRAINED_MODEL_ENDPOINT(model_name=response_json[API_MODEL_NAME_FIELD],
model_version=response_json[API_MODEL_VERSION_FIELD]),
log_msg_before=f'Triggered training of the model {model_name} from the dataset '
f'{dataset_id}, waiting for the training to complete',
wait_status=409,
sleep_interval=self.polling_long_sleep).json()
def delete_trained_model(self, model_name, model_version) -> {}:
"""
Deletes an existing trained model
:param model_name: Name of the existing model to delete
:param model_version: Version of the existing model to delete
:return: An empty object
"""
response = self.delete(TRAINED_MODEL_ENDPOINT(model_name=model_name, model_version=model_version),
log_msg_before=f'Triggering deletion of the model {model_name} with version {model_version}',
log_msg_after=f'Successfully deleted the model {model_name} with version {model_version}')
return response.json()
def get_trained_models_info(self) -> List[dict]:
"""
Gets information about all trained models
:return: An array of trained models corresponding to the 'models' part of the json response . Each model
information contains training status and training accuracy data.
"""
response = self.get(TRAINED_MODELS_ENDPOINT,
log_msg_before='Getting information about all trained models',
log_msg_after='Getting information about all trained models')
return response.json()[API_MODELS_FIELD]
def get_trained_model_info(self, model_name, model_version) -> dict:
"""
Gets information about a specific trained model
:param model_name: The name of the model
:param model_version: The version of the model
:return: Object containing the training status and training accuracy data
"""
response = self.get(TRAINED_MODEL_ENDPOINT(model_name=model_name, model_version=model_version),
log_msg_before=f'Getting information about the model {model_name} with version {model_version}',
log_msg_after=f'Successfully got the information about the model {model_name} with version {model_version}')
return response.json()
# Model deployment
def deploy_model(self, model_name, model_version) -> dict:
"""
Deploys a trained model to be available for inference
:param model_name: The name of the trained model
:param model_version: The version of the trained model
:return: Object containing information about the deployed model serving
"""
response = self.post(DEPLOYMENTS_ENDPOINT, validate=False,
json={
API_MODEL_VERSION_FIELD: model_version,
API_MODEL_NAME_FIELD: model_name
},
log_msg_before=f'Triggering the deployment of the model {model_name} with version {model_version}')
if response.status_code == 409:
# TODO: Change the API to differ between the 409 codes, see: DIGITALCONTENTPROCESSING-709
if 'model is already deployed' in response.text:
self.raise_for_status_with_logging(response)
time.sleep(self.polling_long_sleep)
return self.deploy_model(model_name, model_version)
self.raise_for_status_with_logging(response)
return self._poll_for_url(MODEL_DEPLOYMENT_ENDPOINT(deployment_id=response.json()[API_DEPLOYMENT_ID_FIELD]),
log_msg_before=f'Successfully triggered the deployment of the model {model_name} '
f'with version {model_version}, waiting for the deployment completion',
wait_status=409,
sleep_interval=self.polling_long_sleep).json()
def get_deployed_models_info(self) -> List[dict]:
"""
Gets information about all deployed model servings
:return: An array of all deployed model servings corresponding to the 'deployments' part if the json response
"""
response = self.get(DEPLOYMENTS_ENDPOINT,
log_msg_before='Getting information about all deployed models',
log_msg_after='Successfully got information about all deployed models')
return response.json()[API_DEPLOYMENTS_FIELD]
def get_deployed_model_info(self, model_name_or_deployment_id, model_version=None) -> dict:
"""
Gets information about a specific deployed model serving. This method can be called either with the ID of the
deployed model or with the model name and version
:param model_name_or_deployment_id: ID of the deployed model or the model name, if the model name is provided,
version has to be provided as well
:param model_version: The version of the deployed model
:return: Object containing the information about the deployed model serving
"""
if model_version:
deployed_models = self.get_deployed_models_info()
models = [
model for model in deployed_models
if model[API_MODEL_NAME_FIELD] == model_name_or_deployment_id and model[API_MODEL_VERSION_FIELD] == model_version
]
assert len(models) == 1, f"Model with name {model_name_or_deployment_id} and version {model_version} " \
f"does not exist, or more than one deployment exists for given name and version"
self.logger.info(f'Successfully got information about the deployment of the model '
f'{model_name_or_deployment_id} with version {model_version}')
return models[0]
else:
response = self.get(MODEL_DEPLOYMENT_ENDPOINT(deployment_id=model_name_or_deployment_id),
log_msg_before=f'Getting the deployment of the model with ID {model_name_or_deployment_id}',
log_msg_after=f'Successfully got information about the deployment of the model with '
f'ID {model_name_or_deployment_id}')
return response.json()
def undeploy_model(self, model_name_or_deployment_id, model_version=None) -> {}:
"""
Removes a deployment of the specific model serving. This method can be called either with the ID of the
deployed model or with the model name and version
:param model_name_or_deployment_id: ID of the deployed model or the model name, if the model name is provided,
version has to be provided as well
:param model_version: The version of the deployed model
:return: An empty object
"""
if model_version:
model_name_or_deployment_id = self.get_deployed_model_info(model_name_or_deployment_id,
model_version)[API_DEPLOYMENT_ID_FIELD]
self.delete(MODEL_DEPLOYMENT_ENDPOINT(deployment_id=model_name_or_deployment_id),
log_msg_before=f'Triggering the removal of the model deployment with ID {model_name_or_deployment_id}',
log_msg_after=f'Successfully triggered the removal of the model deployment with ID '
f'{model_name_or_deployment_id}, waiting for the deployment completion')
return self._poll_for_url(MODEL_DEPLOYMENT_ENDPOINT(deployment_id=model_name_or_deployment_id), False, 404,
200).json()
@staticmethod
def _find_files(directory, file_extension):
return [os.path.join(directory, name) for name in os.listdir(directory)
if name.lower().endswith(file_extension)]
|
/sap_business_document_processing-0.3.2-py3-none-any.whl/sap_business_document_processing/document_classification_client/dc_api_client.py
| 0.77373 | 0.169372 |
dc_api_client.py
|
pypi
|
import mimetypes
from .constants import API_FIELD_CLIENT_ID, API_FIELD_DOCUMENT_TYPE, API_FIELD_ENRICHMENT, API_FIELD_TEMPLATE_ID, \
API_FIELD_EXTRACTED_HEADER_FIELDS, API_FIELD_EXTRACTED_LINE_ITEM_FIELDS, API_REQUEST_FIELD_EXTRACTED_FIELDS, \
API_FIELD_FILE_TYPE, API_REQUEST_FIELD_RECEIVED_DATE
def create_document_options(client_id, document_type, header_fields=None, line_item_fields=None, template_id=None,
received_date=None, enrichment=None):
options = {
API_FIELD_CLIENT_ID: client_id,
API_FIELD_DOCUMENT_TYPE: document_type,
API_REQUEST_FIELD_EXTRACTED_FIELDS: {}
}
if header_fields is None:
header_fields = []
elif isinstance(header_fields, str):
header_fields = [s.strip() for s in header_fields.split(',')]
elif not isinstance(header_fields, list):
raise TypeError(f'Input variable \'header_fields\' has wrong type: {type(header_fields)}. Should be a string '
f'of comma separated values or a list of strings')
options[API_REQUEST_FIELD_EXTRACTED_FIELDS][API_FIELD_EXTRACTED_HEADER_FIELDS] = header_fields
if line_item_fields is None:
line_item_fields = []
elif isinstance(line_item_fields, str):
line_item_fields = [s.strip() for s in line_item_fields.split(',')]
elif not isinstance(line_item_fields, list):
raise TypeError(f'Input variable \'line_item_fields\' has wrong type: {type(line_item_fields)}. Should be a '
f'string of comma separated values or a list of strings')
options[API_REQUEST_FIELD_EXTRACTED_FIELDS][API_FIELD_EXTRACTED_LINE_ITEM_FIELDS] = line_item_fields
if template_id is not None:
options[API_FIELD_TEMPLATE_ID] = template_id
if received_date is not None:
options[API_REQUEST_FIELD_RECEIVED_DATE] = received_date
if enrichment is not None:
options[API_FIELD_ENRICHMENT] = enrichment
return options
def create_capability_mapping_options(document_type, file_type, header_fields=None, line_item_fields=None):
options = {
API_FIELD_DOCUMENT_TYPE: document_type,
API_FIELD_FILE_TYPE: file_type
}
if header_fields is None:
header_fields = []
elif isinstance(header_fields, str):
header_fields = [s.strip() for s in header_fields.split(',')]
elif not isinstance(header_fields, list):
raise TypeError(f'Input variable \'header_fields\' has wrong type: {type(header_fields)}. Should be a string '
f'of comma separated values or a list of strings')
options[API_FIELD_EXTRACTED_HEADER_FIELDS] = header_fields
if line_item_fields is None:
line_item_fields = []
elif isinstance(line_item_fields, str):
line_item_fields = [s.strip() for s in line_item_fields.split(',')]
elif not isinstance(line_item_fields, list):
raise TypeError(f'Input variable \'line_item_fields\' has wrong type: {type(line_item_fields)}. Should be a '
f'string of comma separated values or a list of strings')
options[API_FIELD_EXTRACTED_LINE_ITEM_FIELDS] = line_item_fields
return options
def get_mimetype(filename: str) -> str:
return mimetypes.guess_type(filename)[0]
|
/sap_business_document_processing-0.3.2-py3-none-any.whl/sap_business_document_processing/document_information_extraction_client/helpers.py
| 0.52683 | 0.169406 |
helpers.py
|
pypi
|
import logging
from .constants import DATASETS_ENDPOINT, DATASET_BY_ID_ENDPOINT, \
DATASET_DOCUMENTS_ENDPOINT, DATASET_DOCUMENT_BY_ID_ENDPOINT, TRAINING_JOBS_ENDPOINT, TRAINING_JOB_BY_ID_ENDPOINT, \
MODELS_ENDPOINT, MODEL_BY_NAME_ENDPOINT, MODEL_BY_VERSION_ENDPOINT, \
DEPLOYMENTS_ENDPOINT, DEPLOYMENT_BY_ID_ENDPOINT, INFERENCE_JOBS_ENDPOINT, INFERENCE_JOB_BY_ID_ENDPOINT, \
BATCH_INFERENCE_JOB_BY_ID_ENDPOINT
from .http_client_base import CommonClient
class BER_API_Client(CommonClient):
"""
This class provides an interface to access SAP Business Entity Recognition REST API from a Python application.
Structure of values returned by all the methods is documented in Swagger. See Swagger UI by adding:
/api/v1 to your Business Entity Recognition service key URL value (from outside the uaa section).
:param base_url: The service URL taken from the service key (key 'url' in service key JSON)
:param client_id: The client ID taken from the service key (key 'uaa.clientid' in service key JSON)
:param client_secret: The client secret taken from the service key (key 'uaa.clientsecret' in service key JSON)
:param uaa_url: The XSUAA URL taken from the service key (key 'uaa.url' in service key JSON)
produce any logs
"""
def __init__(self,
base_url,
client_id,
client_secret,
uaa_url,
logging_level=logging.WARNING):
logger = logging.getLogger('BERApiClient')
logger.setLevel(logging_level)
CommonClient.__init__(self,
base_url=base_url,
client_id=client_id,
client_secret=client_secret,
uaa_url=uaa_url,
url_path_prefix='api/v1/',
logging_level=logging_level)
self.logger = logger
# Datasets
def get_datasets(self):
"""
Gets summary information about the existing datasets
:return: Object containing an array of datasets
"""
self.logger.debug('Getting information about datasets')
response = self.session.get(self.path_to_url(DATASETS_ENDPOINT))
response.raise_for_status()
self.logger.info('Successfully got the information about the datasets')
return response
# Dataset
def create_dataset(self, dataset_type="training"):
"""
Creates an empty dataset
:return: Object containing the dataset id
"""
self.logger.debug('Creating a new dataset')
response = self.session.post(self.path_to_url(DATASETS_ENDPOINT),
json={"datasetType": dataset_type})
response.raise_for_status()
self.logger.info('Successfully created a new dataset')
return response
def get_dataset(self, dataset_id):
"""
Gets statistical information about a dataset with a given ID
:param dataset_id: The ID of the dataset
:return: Summary information about the dataset that includes the number of documents in different processing
stages
"""
self.logger.debug('Getting information about the dataset {}'.format(dataset_id))
response = self.session.get(self.path_to_url(DATASET_BY_ID_ENDPOINT(dataset_id=dataset_id)))
response.raise_for_status()
self.logger.info('Successfully got the information about the dataset {}'.format(dataset_id))
return response
def delete_dataset(self, dataset_id):
"""
Deletes a dataset with a given ID
:param dataset_id: The ID of the dataset to delete
:return: Object containing the ID of the deleted dataset and the number of documents deleted
"""
self.logger.debug('Deleting the dataset {}'.format(dataset_id))
response = self.session.delete(self.path_to_url(DATASET_BY_ID_ENDPOINT(dataset_id=dataset_id)))
response.raise_for_status()
self.logger.info('Successfully deleted the dataset {}'.format(dataset_id))
return response
# Documents
def get_dataset_documents(self, dataset_id):
"""
Gets the information about all the documents in a specific dataset
:param dataset_id: The ID of an existing dataset
:return: Object that contains array of the documents
"""
self.logger.debug('Getting information about the documents in the dataset {}'.format(dataset_id))
response = self.session.get(self.path_to_url(DATASET_DOCUMENTS_ENDPOINT(dataset_id=dataset_id)))
response.raise_for_status()
self.logger.info('Successfully got the information about the documents in the dataset {}'.format(dataset_id))
return response
# Document
def upload_document_to_dataset(self, dataset_id, document_path):
"""
Uploads a single document and its ground truth to a specific dataset
:param dataset_id: The ID of the dataset
:param document_path: The path to the document
:return: Object containing information about the uploaded document
"""
self.logger.debug('Uploading the document {} to the dataset {}'.format(
document_path, dataset_id))
document_name = document_path.split("/")[-1]
response = self.session.post(self.path_to_url(DATASET_DOCUMENTS_ENDPOINT(dataset_id=dataset_id)),
files={'document': (document_name, open(document_path, 'rb'), "application/json")})
response.raise_for_status()
self.logger.debug('Successfully uploaded the document {} to the dataset {}, waiting for '
'the document processing'.format(document_path, dataset_id))
return response
def get_dataset_document(self, dataset_id, document_id):
"""
Gets the information about all the documents in a specific dataset
:param dataset_id: The ID of an existing dataset
:param document_id: The reference ID of the document
:return: Object that contains array of the documents
"""
self.logger.debug('Getting information about the document {} in the dataset {}'.format(document_id, dataset_id))
response = self.session.get(self.path_to_url(DATASET_DOCUMENT_BY_ID_ENDPOINT(dataset_id=dataset_id,
document_id=document_id)))
response.raise_for_status()
self.logger.info('Successfully got the information about the document {} in the dataset {}'.format(document_id,
dataset_id))
return response
def delete_dataset_document(self, dataset_id, document_id):
"""
Deletes a training document from a dataset
:param dataset_id: The ID of the dataset where the document is located
:param document_id: The reference ID of the document
:return: response
"""
self.logger.debug('Deleting the document {} from the dataset {}'.format(document_id, dataset_id))
response = self.session.delete(
self.path_to_url(DATASET_DOCUMENT_BY_ID_ENDPOINT(dataset_id=dataset_id, document_id=document_id)))
response.raise_for_status()
self.logger.info('Successfully deleted the document {} from the dataset {}'.format(document_id, dataset_id))
return response
# Training
def train_model(self, model_name, dataset_id):
"""
Trigger the process to train a new model for BER, based on the documents in the
specific dataset and wait until this process is finished. The process may take significant time to complete
depending on the size of the dataset.
:param model_name: The name of the new model to train
:param dataset_id: The name of existing dataset containing enough documents for training
:return: Object containing success or error message
"""
self.logger.debug('Triggering training of the model {} from the dataset {}'.format(model_name, dataset_id))
response = self.session.post(self.path_to_url(TRAINING_JOBS_ENDPOINT),
json={"datasetId": dataset_id,
"modelName": model_name})
response.raise_for_status()
self.logger.info('Triggered training of the model {} from the dataset {}, waiting for the training to complete'
.format(model_name, dataset_id))
return response
def get_training_status(self, job_id):
"""
Get status detail of an ongoing training
:param job_id: The job id of training job
:return: Object containing the status detail the training
"""
self.logger.debug('getting training of job {}'.format(job_id))
response = self.session.get(self.path_to_url(TRAINING_JOB_BY_ID_ENDPOINT(job_id=job_id)))
response.raise_for_status()
return response
def delete_ongoing_training(self, job_id):
"""
Deletes an ongoing training
:param job_id: The job id of training job
:return: Object containing the success or error message
"""
self.logger.debug('getting training of job {}'.format(job_id))
response = self.session.delete(self.path_to_url(TRAINING_JOB_BY_ID_ENDPOINT(job_id=job_id)))
response.raise_for_status()
return response
def get_recently_submitted_training_jobs_list(self):
"""
Fetches the list of recently submitted training jobs (~12 Hour interval)
:return: Object containing the list of training jobs
"""
self.logger.debug('getting list of recently submitted training jobs')
response = self.session.get(self.path_to_url(TRAINING_JOBS_ENDPOINT))
response.raise_for_status()
return response
# Models
def get_trained_models(self):
"""
Gets information about all trained models
:return: Object containing the array of all trained models, each model information contains training status and
training accuracy data
"""
self.logger.debug('Getting information about all trained models')
response = self.session.get(self.path_to_url(MODELS_ENDPOINT))
response.raise_for_status()
self.logger.info('Successfully got information about all trained models')
return response
# Model Versions
def get_trained_model_versions(self, model_name):
"""
Gets information about a specific trained model
:param model_name: The name of the model
:return: Object containing all versions of the model with the training status and training accuracy data
"""
self.logger.debug('Getting information about the model {}'.format(model_name))
response = self.session.get(self.path_to_url(MODEL_BY_NAME_ENDPOINT(model_name=model_name)))
response.raise_for_status()
self.logger.info('Successfully got the information about the model {}'.format(
model_name))
return response
# Model Version
def get_trained_model_version(self, model_name, model_version):
"""
Gets information about a specific trained model
:param model_name: The name of the model
:param model_version: The version of the model
:return: Object containing the training status and training accuracy data
"""
self.logger.debug('Getting information about the model {} with version {}'.format(model_name, model_version))
response = self.session.get(self.path_to_url(MODEL_BY_VERSION_ENDPOINT(model_name=model_name,
model_version=model_version)))
response.raise_for_status()
self.logger.info('Successfully got the information about the model {} with version {}'.format(model_name,
model_version))
return response
def delete_trained_model_version(self, model_name, model_version):
"""
Deletes an existing trained model
:param model_name: Name of the existing model to delete
:param model_version: Version of the existing model to delete
:return: Object containing the message of success or error
"""
self.logger.debug('Triggering deletion of the model {} with version {}'.format(model_name, model_version))
response = self.session.delete(
self.path_to_url(MODEL_BY_VERSION_ENDPOINT(model_name=model_name, model_version=model_version)))
response.raise_for_status()
self.logger.info('Successfully deleted the model {} with version {}'.format(model_name, model_version))
return response
# Deployment
def deploy_model(self, model_name, model_version):
"""
Deploys a trained model to be available for inference
:param model_name: The name of the trained model
:param model_version: The version of the trained model
:return: Object containing information about the deployed model serving
"""
self.logger.debug('Triggering the deployment of the model {} with version {}'.format(model_name, model_version))
response = self.session.post(self.path_to_url(DEPLOYMENTS_ENDPOINT), json={
"modelName": model_name,
"modelVersion": model_version
})
response.raise_for_status()
self.logger.info('Successfully triggered the deployment of the model {} with version {}, waiting for '
'the deployment completion'.format(model_name, model_version))
return response
def get_deployed_model(self, deployment_id):
"""
Gets information about a specific deployed model.
:param deployment_id: ID of the deployed model
:return: Object containing the information about the deployed model serving
"""
# todo: code can be changed to fetch the version directly
self.logger.debug('Getting the deployment of the deployment id {}'.format(
deployment_id))
response = self.session.get(self.path_to_url(DEPLOYMENT_BY_ID_ENDPOINT(deployment_id=deployment_id)))
self.logger.info('Successfully got information about the deployment of the deployment id {}'.format(
deployment_id))
return response
def undeploy_model(self, deployment_id):
"""
Removes a deployment of the specific model.
:param deployment_id: ID of the deployed model
:return: response with details of undeployed model
"""
self.logger.debug(
'Triggering the removal of the model deployment with ID {}'.format(deployment_id))
response = self.session.delete(self.path_to_url(DEPLOYMENT_BY_ID_ENDPOINT(deployment_id=deployment_id)))
# response.raise_for_status()
self.logger.info('Successfully triggered the removal of the model deployment with ID {}, waiting for '
'the deployment completion'.format(deployment_id))
return response
def get_deployments(self):
"""
See information about all deployed models.
:return: Object containing information about the deployed models
"""
self.logger.debug('Triggering the get deployments')
response = self.session.get(self.path_to_url(DEPLOYMENTS_ENDPOINT))
response.raise_for_status()
self.logger.info('Successfully received the deployment details')
return response
# Inference
def post_inference_job(self, text, model_name, model_version):
"""
Triggers inference job
:param text: The name of the new model to train
:param model_name: The name of existing model
:param model_version: The version of existing model
:return: Object containing the job details
"""
self.logger.debug('Submitting inference job')
response = self.session.post(self.path_to_url(INFERENCE_JOBS_ENDPOINT),
json={"text": text,
"modelName": model_name,
"modelVersion": model_version})
response.raise_for_status()
self.logger.info('Submitted inference job successfully')
return response
def get_inference_job(self, job_id):
"""
Gets information about inference job
:param job_id: Inference Job ID
:return: Object containing the predicted result
"""
self.logger.debug('Getting inference job information')
response = self.session.get(self.path_to_url(INFERENCE_JOB_BY_ID_ENDPOINT(job_id=job_id)))
response.raise_for_status()
self.logger.info('Received inference job details successfully')
return response
def post_batch_inference_job(self, dataset_id, model_name, model_version):
"""
Triggers batch inference job
:param dataset_id: Id of the inference dataset
:param model_name: The name of existing model
:param model_version: The version of existing model
:return:
"""
self.logger.debug('Submitting batch inference job')
response = self.session.post(self.path_to_url(INFERENCE_JOBS_ENDPOINT),
json={"datasetId": dataset_id,
"modelName": model_name,
"modelVersion": model_version})
response.raise_for_status()
self.logger.info('Submitted batch inference job successfully')
return response
def get_batch_inference_job_result(self, job_id):
"""
Gets results of batch inference job
:param job_id: Inference Job ID
:return: Object containing the predicted result
"""
self.logger.debug('Getting inference job information')
response = self.session.get(self.path_to_url(BATCH_INFERENCE_JOB_BY_ID_ENDPOINT(job_id=job_id)))
response.raise_for_status()
self.logger.info('Received batch inference job details successfully')
return response
|
/sap_business_entity_recognition_client_library-1.4-py3-none-any.whl/sap_ber_client/ber_api_client.py
| 0.690037 | 0.266 |
ber_api_client.py
|
pypi
|
import numpy as np
import argparse
import os
from PIL import Image
class BackofficeIconConverter:
"""
Icon creator to convert an input file into the correct format needed by
the SAP Commerce Backoffice framework to be used as icon in the
explorer-tree.
"""
#: Side length of a single image (height = width)
SINGLE_IMAGE_SIDE_LENGTH = 16
#: The backoffice icon file must be 16x80 as RGBA
BACKOFFICE_ICON_SIZE = (80, 16, 4)
def __init__(self, inputFilename: str) -> None:
"""
Initialises a new BackofficeIconCreator that can be used to convert a
single icon.
:param inputFilename: Input file used as source for the conversation.
"""
self.sourceFilename = inputFilename
def getDefaultTargetPath(self) -> str:
""" Create the default output path from the input name. """
head, tail = os.path.split(self.sourceFilename)
outFilename = f"backoffice-{tail}"
return os.path.join(head, outFilename)
def convertToDefault(self) -> None:
"""Convert the current file and place it under the default
output location. If the target file already exists, it will be
overridden.
"""
self.convertTo(self.getDefaultTargetPath())
def convertTo(self, targetPath: str) -> None:
"""Convert the current file and place it under the targetPath.
If there is already a file in the given location, it will be
overridden.
:param targetPath: The path including filename.
"""
sourceImage = Image.open(self.sourceFilename).convert('RGBA')
width, height = sourceImage.size
if (width != self.SINGLE_IMAGE_SIDE_LENGTH or
height != self.SINGLE_IMAGE_SIDE_LENGTH):
raise ValueError("input image must be {}x{} but {} was actually \
{}x{}!".format(
self.SINGLE_IMAGE_SIDE_LENGTH,
self.SINGLE_IMAGE_SIDE_LENGTH,
self.sourceFilename,
width,
height
))
sourceImg = sourceImage.load()
outImg = np.zeros(self.BACKOFFICE_ICON_SIZE, dtype=np.uint8)
self.__fillImageWithColor(outImg, sourceImg, 0, (127, 144, 165, 255))
self.__fillImageWithColor(outImg, sourceImg, 1, (85, 102, 122, 255))
self.__fillImageWithColor(outImg, sourceImg, 2, (4, 134, 224, 255))
self.__fillImageWithColor(outImg, sourceImg, 3, (4, 134, 224, 255))
self.__fillImageWithColor(outImg, sourceImg, 4, (190, 196, 209, 255))
outImage = Image.fromarray(outImg)
outImage.save(targetPath)
def __fillImageWithColor(
self,
outImg: np.ndarray,
sourceImg,
heightDisplace: int,
color: tuple
) -> None:
heightDisplaceInPixel = heightDisplace * self.SINGLE_IMAGE_SIDE_LENGTH
for y in range(self.SINGLE_IMAGE_SIDE_LENGTH):
for x in range(self.SINGLE_IMAGE_SIDE_LENGTH):
sourcePx = sourceImg[x, y]
# take alpha value from source, the color from the target color
color = (color[0], color[1], color[2], sourcePx[3])
outImg[heightDisplaceInPixel + y, x] = color
def main():
parser = argparse.ArgumentParser(
description="Convert simple icons to the SAP Commerce Backoffice \
explorer tree icon format. The icon must be a sprite consist of 5 \
different color shades of the icon itself."
)
parser.add_argument("inputIcons", nargs="+", help="files to convert")
parser.add_argument("-o", "--output", dest="output")
args = parser.parse_args()
for file in args.inputIcons:
print(f"Process icon {file}...")
iconConverter = BackofficeIconConverter(file)
if args.output:
# combine the current filename with the out folder name
head, tail = os.path.split(file)
targetPath = os.path.join(args.output, tail)
iconConverter.convertTo(targetPath)
print(f"{file} => {targetPath}")
pass
else:
iconConverter.convertToDefault()
print(f"{file} => {iconConverter.getDefaultTargetPath()}")
if __name__ == "__main__":
main()
|
/sap_commerce_backoffice_icons-1.0.0.tar.gz/sap_commerce_backoffice_icons-1.0.0/sap_commerce_backoffice_icons/backofficeIconConverter.py
| 0.623835 | 0.254596 |
backofficeIconConverter.py
|
pypi
|
triplet distance learning."""
from typing import Dict, List, Union, Tuple
from collections.abc import Iterable as IsIterable
from detectron2.structures import ImageList
from detectron2.config import CfgNode
import torch
from torch import nn
from .backbones import build_backbone
class ProjectionLayer(nn.Module):
"""Layers used to build the projection head.
It is fully connected layers with optional dropout and configurable
activation function.
"""
def __init__(self,
in_size: int,
out_size: int,
dropout: Union[None, float]=None,
activation: Union[None, str, 'nn.Module']=None):
"""Create fully connected layer with dropout and activation.
Parameters
----------
in_size: int
Number of input features
out_size: int
Number of neurons/output features
dropout: None or float
Dropout propability.
If None dropout is disabled.
activation: str, nn.Module or None
Activation function applied to the output.
If str it has to match the name of an activation function from torch.nn.
If None no activation function will be used.
"""
super().__init__()
self.fc = nn.Linear(in_size, out_size)
if isinstance(activation, str):
activation = getattr(nn, activation)
elif isinstance(activation, nn.Module) or activation is None:
pass
else:
raise ValueError('`activation` has to be either None, the name of the activation or an nn.Module!')
if activation is not None:
self.activation = activation()
else:
self.activation = nn.Identity()
if isinstance(dropout, float):
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = nn.Identity()
def forward(self, features: 'torch.Tensor') -> 'torch.Tensor':
"""Forward step of the layers.
Parameter
---------
features: torch.Tensor
Input features.
Returns
-------
torch.Tensor
Layer output
"""
return self.dropout(self.activation(self.fc(features)))
class BaseModel(nn.Module): # pylint: disable=R0902
"""This model is the basis for the ImageClassification and TripletDistancerLerner
models.
"""
def __init__(self, # pylint: disable=R0914, R0912, R0915
*,
backbone: 'nn.Module',
pixel_mean: 'torch.Tensor',
pixel_std: 'torch.Tensor',
out_dim: Union[None, int]=None,
dropout: Union[None, float]=0.5,
image_size: Union[None, int, Tuple[int, int]]=None,
intermediate_sizes: Union[None, int, List[int]]=None,
input_feats: Union[None, List[str]]=None,
pooling: Union[bool, str]=True,
normalize_output: bool=False,
freeze_backbone: bool=False,
activation_projection_layer: Union[str, None]='ReLU'):
"""The model uses a backbone model and appends a projection head.
It supports different options for the projection head.
- using multiple outputs from the backbone e.g. different levels from a FPN
- pooling of backbone features
- normalization of the final output
- configurable number of fc layers with different activation functions and optional dropout
- support backbones from the timm's library.
Parameters
----------
backbone: nn.Module
Backbone model. Supports all backbones from detectron2 and TimmBackbones.
See sap_computer_vision.modelling.backbones.timm_backbones for details.
pixel_mean: torch.Tensor
Mean pixel values
pixel_std: torch.Tensor
Std of the pixel values
out_dim: None or int, optional, default=None
Dimensionality of the final layer.
If None the last layer is nn.Identity
dropout: float, optional, default=0.5
Dropout probability between projection head layers.
image_size: None, int or Tuple[Int, Int], optinal, default=None
If None input image of different sizes are supported.
Beware when not using pooling or for backbones that output flat
feature vectors. The input images need a fixed size and image_size
cannot be None. If int the height and width of the input image have
to be equal. If non square images images_size has to be a tuple
(height, width).
intermediate_sizes: None, int or List[int], optinal, default=None
Size of layers between backbone and final output layer.
If None no intermediate layer is used.
input_feats: None or List[str], optinal, default=None
Name of the output features used from the backbone.
If None the model tries to figure out which features are provided by the
backbone and uses all.
pooling: bool or str, optional, default=True
Pooling of backbone output. Available pooling methods are 'max' and
'average'. If True the default method 'max' will be used.
normalize_output: bool, optional, default=True
Normalize the final model output.
activation_projection_layer: None or str, optional, default='ReLU'
Name of the activation function used for the intermediate layers.
Have to match the name of an activation function form torch.nn.
"""
super().__init__()
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
self.image_size = image_size
self.backbone = backbone
if freeze_backbone:
for param in self.backbone.parameters():
param.requires_grad = False
backbone_output = self.backbone.output_shape()
if input_feats is None:
input_feats = backbone_output.keys()
self.used_backbone_feats = input_feats
self.pooling = pooling
self.normalize_output = normalize_output
if isinstance(self.pooling, bool) and self.pooling:
self.pooling = 'max'
if isinstance(self.pooling, bool) and not self.pooling:
self.pooling = None
elif isinstance(self.pooling, str) and self.pooling in ['max', 'average']:
pass
else:
raise ValueError('`pooling` has to be either bool or str (options: `max`, `average`).')
feat_size = 0
if self.pooling is not None:
for feat in self.used_backbone_feats:
feat = backbone_output[feat]
channels = feat.channels
feat_size += channels
if self.pooling == 'max':
self.pooler = nn.AdaptiveMaxPool2d((1,1))
elif self.pooling == 'average':
self.pooler = nn.AdaptiveAvgPool2d((1,1))
else:
raise ValueError('`pooling` has to be either bool or str (options: `max`, `average`).')
else:
if self.image_size is None:
raise ValueError('When not using `pooling` the input image_size '
'has to be fixed and provided to the model.')
if isinstance(self.image_size, int):
height, width = self.image_size, self.image_size
elif isinstance(self.image_size, IsIterable):
height, width = self.image_size
for feat in self.used_backbone_feats:
feat = backbone_output[feat]
channels = feat.channels
stride = feat.stride
if stride is not None:
if not (height % stride == 0 and width % stride == 0):
raise ValueError(f'`image_size=({height}, {width})` hast to be a multiple of {stride}!')
feat_size += channels * (height // stride) * (width // stride)
else:
feat_size += feat.channels
if isinstance(intermediate_sizes, int):
intermediate_sizes = [intermediate_sizes]
elif intermediate_sizes is None:
intermediate_sizes = []
input_size = feat_size
self.projection_layers = len(intermediate_sizes) if intermediate_sizes is not None else 0
for i, intermediate_size in enumerate(intermediate_sizes):
output_size = min(intermediate_size, input_size)
setattr(self, f'projection_layer_{i+1}', ProjectionLayer(input_size,
output_size,
activation=activation_projection_layer,
dropout=dropout))
input_size = output_size
self.out = nn.Identity() if out_dim is None else nn.Linear(input_size, out_dim)
self.out_dim = input_size if out_dim is None else out_dim
def forward(self,
batched_inputs: List[Dict[str, 'torch.Tensor']],
return_pooled_features: bool=False) -> 'torch.Tensor':
"""This class is not a complete model.
Use subclasses like ImageClassifier or TripletDistanceLearner or create new
subclass."""
raise NotImplementedError
@classmethod
def from_config(cls, cfg: 'CfgNode') -> Dict:
"""Classmethod to create an instance based on the config.
Check detectron configs mechanism.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
dict
Dict with the relevant kwargs. This dict can be consumed by the
__init__ function.
"""
backbone, pixel_mean, pixel_std = build_backbone(cfg)
if cfg.MODEL.get('FEATURE_EXTRACTION', None) is None:
raise KeyError('The config is missing the cfg.MODEL.FEATURE_EXTRACTION node. '
'This is most likely because the config was created for an older version of the package. '
'Try converting the config using the function '
'\'sap_computer_vision.modelling.base.adjust_model_configs_base_'
'architecture_to_feature_extraction_node\'.')
kwargs_base = {
"backbone": backbone,
"pixel_mean": pixel_mean,
"pixel_std": pixel_std,
"intermediate_sizes": cfg.MODEL.FEATURE_EXTRACTION.INTERMEDIATE_SIZE,
"dropout": cfg.MODEL.FEATURE_EXTRACTION.DROPOUT_FC,
"pooling": cfg.MODEL.FEATURE_EXTRACTION.POOL_BACKBONE_FEATURES,
"normalize_output": cfg.MODEL.FEATURE_EXTRACTION.NORMALIZE_OUTPUT,
"out_dim": cfg.MODEL.FEATURE_EXTRACTION.PROJECTION_SIZE,
"input_feats": cfg.MODEL.FEATURE_EXTRACTION.IN_FEATURES,
"image_size": cfg.INPUT.FIXED_IMAGE_SIZE,
"freeze_backbone": cfg.MODEL.FEATURE_EXTRACTION.FREEZE_BACKBONE
}
return kwargs_base
@property
def device(self):
return self.pixel_mean.device
def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]], image_key: str='image') -> 'ImageList':
"""
Normalize, pad and batch the input images.
Parameters
----------
batched_inputs: list of dicts
List of input dicts
image_key: str, optinal, default='image'
Key of the images in the input dict
Returns
-------
ImageList
Returns instance of detectron2.ImageList
"""
images = [x[image_key].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
def inference(self,
batched_inputs: List[Dict[str, torch.Tensor]],
return_pooled_features: bool=False,
image_key: str='image') -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""Forward step of the model.
Parameters
----------
batched_inputs: list of dicts
Input batch
return_pooled_features: bool, optinal, default=False
If only the embeddings (False) or the embeddings and the
pooled/unpooled backbone features should be returned.
The backbone features can be used to visualize
similarities as seen by the model between images.
Check sap_computer_vision.utils.deep_similarity.
Returns
-------
torch.Tensor or Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
Returns the embedding tensor and if return_pooled_features=True
pooled and unpoold backbone features.
"""
images = self.preprocess_image(batched_inputs, image_key)
features = self.backbone(images.tensor)
returns = []
features = tuple(features[f] for f in self.used_backbone_feats)
if self.pooling:
if return_pooled_features:
returns.append(features)
features = tuple(torch.flatten(self.pooler(f), start_dim=1) for f in features)
if return_pooled_features:
returns.append(features)
else:
features = tuple((torch.flatten(f, start_dim=1) for f in features))
features = torch.cat(features, 1)
for i in range(self.projection_layers):
l = getattr(self, f'projection_layer_{i+1}')
features = l(features)
features = self.out(features)
if self.normalize_output:
features = nn.functional.normalize(features, p=2.0, dim=1)
if return_pooled_features:
if not self.pooling:
raise AttributeError('The model is initialized with `pooling=False`, '
'therefore returning pooled features is not possible')
returns.append(features)
return returns[::-1]
else:
return features
def adjust_model_configs_base_architecture_to_feature_extraction_node(cfg: CfgNode):
"""Function to transform old img clf and triplet learner configs to new structure.
Previously the architecture of the feature extraction part was defined in the nodes
cfg.MODEL.IMAGE_CLASSIFICATION/cfg.MODEL.TRIPLET_DISTANCE_LEARNER.
For more consistency most options were moved to the config node cfg.MODEL.FEATURE_EXTRACTION.
"""
fe_node = cfg.MODEL.get('FEATURE_EXTRACTION', CfgNode({}))
if cfg.MODEL.META_ARCHITECTURE == 'ImageClassifier':
model_node = cfg.MODEL.get('IMAGE_CLASSIFIER')
defaults = {
'PROJECTION_SIZE': model_node.NUM_CLASSES,
'INTERMEDIATE_SIZE': 500,
'DROPOUT_FC': 0.5,
'IN_FEATURES': None,
'NORMALIZE_OUTPUT': False,
'POOL_BACKBONE_FEATURES': False,
}
model_node_values = {
'NUM_CLASSES': None
}
elif cfg.MODEL.META_ARCHITECTURE == 'TripletDistanceLearner':
model_node = cfg.MODEL.get('TRIPLET_DISTANCE_LEARNER')
defaults = {
'PROJECTION_SIZE': 500,
'INTERMEDIATE_SIZE': None,
'DROPOUT_FC': 0.5,
'IN_FEATURES': None,
'NORMALIZE_OUTPUT': True,
'POOL_BACKBONE_FEATURES': False,
'FREEZE_BACKBONE': False,
}
model_node_values = {}
else:
return cfg
for k, v in defaults.items():
fe_node[k] = model_node.get(k, v)
for k, v in model_node_values.items():
model_node[k] = v
cfg.MODEL['FEATURE_EXTRACTION'] = fe_node
return cfg
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/modelling/base.py
| 0.977586 | 0.597138 |
base.py
|
pypi
|
from typing import Dict, List, Tuple, Union
from detectron2.config import configurable
from detectron2.modeling import META_ARCH_REGISTRY
import torch
from torch import nn
from .base import BaseModel
@META_ARCH_REGISTRY.register()
class ImageClassifier(BaseModel):
"""Model for image classification.
The model architecture is taken from sap_computer_vision.modelling.base.BaseModel.
Right now BCEWithLogitsLoss, CrossEntropyLoss and MultiLabelSoftMarginLoss
are loss available.
"""
@configurable
def __init__(self,
*,
backbone: 'nn.Module',
pixel_mean: 'torch.Tensor',
pixel_std: 'torch.Tensor',
out_dim: Union[None, int]=None,
n_classes: Union[None, int]=None,
dropout: Union[None, float]=0.5,
image_size: Union[None, int, Tuple[int, int]]=None,
intermediate_sizes: Union[None, int, List[int]]=None,
input_feats: Union[None, List[str]]=None,
pooling: Union[bool, str]=True,
multi_label: bool=False,
normalize_output: bool=False,
freeze_backbone: bool=False,
activation_projection_layer: Union[str, None]='ReLU'):
"""Create model instances.
Parameters not documented here are documented for the BaseModel.
Parameters
----------
loss: nn.Module
Loss function.
n_classes: int or None
Nubmer of classes/labels
multi_label: bool, optional, default=False
Multi label use cases.
Beware: This should be seen as a placeholder, since multi label problems
are not yet supported by this repos
"""
super().__init__(
backbone=backbone,
out_dim=out_dim,
intermediate_sizes=intermediate_sizes,
dropout=dropout,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std,
input_feats=input_feats,
pooling=pooling,
normalize_output=normalize_output,
freeze_backbone=freeze_backbone,
activation_projection_layer=activation_projection_layer)
self.binary_classifier = n_classes == 2 and not multi_label
self.clf_layer = nn.Identity() if n_classes is None else nn.Linear(self.out_dim, 1 if self.binary_classifier else n_classes)
self.eval_norm = nn.Sigmoid() if self.binary_classifier else nn.Softmax(dim=1)
if multi_label:
self.multi_label = True
self.criterion = nn.MultiLabelSoftMarginLoss()
elif self.binary_classifier:
self.multi_label = False
self.criterion = nn.BCEWithLogitsLoss()
else:
self.multi_label = False
self.criterion = nn.CrossEntropyLoss()
@classmethod
def from_config(cls, cfg) -> Dict:
"""Classmethod to create an instance based on the config.
Check detectron configs mechanism.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
dict
Dict with the relevant kwargs. This dict can be consumed by the
__init__ function.
"""
kwargs_base = BaseModel.from_config(cfg)
kwargs_clf = {
"n_classes": cfg.MODEL.IMAGE_CLASSIFIER.NUM_CLASSES,
"multi_label": cfg.MODEL.IMAGE_CLASSIFIER.MULTI_LABEL
}
return {**kwargs_base, **kwargs_clf}
def forward(self,
batched_inputs: List[Dict[str, 'torch.Tensor']],
return_pooled_features: bool=False) -> 'torch.Tensor':
"""Forward step of the model.
Parameters
----------
batched_inputs: list of dicts
Input batch
return_pooled_features: bool, optinal, default=False
If only the embeddings (False) or the embeddings and the
pooled/unpooled backbone features should be returned.
The backbone features can be used to visualize
similarities as seen by the model between images.
Check sap_computer_vision.utils.deep_similarity.
Returns
-------
torch.Tensor or Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
Returns the embedding tensor and if return_pooled_features=True
pooled and unpoold backbone features.
"""
if self.training:
fw_results = self.inference(batched_inputs, return_pooled_features=False)
fw_results = self.clf_layer(fw_results)
if self.multi_label:
raise NotImplementedError
elif self.binary_classifier:
fw_results = fw_results.squeeze()
labels = torch.tensor([i['class_id'] for i in batched_inputs], dtype=torch.float).to(self.device)
else:
labels = torch.tensor([i['class_id'] for i in batched_inputs], dtype=torch.long).to(self.device)
return self.criterion(fw_results, labels)
if return_pooled_features:
fw_results, pooled, unpool = self.inference(batched_inputs,
return_pooled_features=return_pooled_features)
else:
fw_results = self.inference(batched_inputs)
fw_results = self.clf_layer(fw_results)
if self.binary_classifier:
fw_results = fw_results.squeeze()
fw_results = self.eval_norm(fw_results)
if return_pooled_features:
return fw_results, pooled, unpool
else:
return fw_results
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/modelling/image_classifier.py
| 0.94743 | 0.329661 |
image_classifier.py
|
pypi
|
from typing import Callable, Dict, List, Tuple, Union
import logging
from detectron2.config import configurable, CfgNode
from detectron2.modeling import META_ARCH_REGISTRY
import torch
from torch import nn
from sap_computer_vision.data.triplet_sampling_utils import create_triplets_from_pk_sample, build_triplet_strategy
from .base import BaseModel
logger = logging.getLogger(__name__)
class SelectiveContrastiveTripletNCALoss(nn.Module):
"""'Selectively Contrastive Triplet Loss' as described
in https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123590120.pdf.
It is a standard Neighborhood Component Analysis Loss (NCA) with an
additonal switch case. For triplet in wich the similarity between
the anchor and the negative example is bigger than the similarity
of the anchor and the positive example the loss is lambda*Similarity(a, n).
Lambda is a tune able paramter.
"""
def __init__(self,
lambda_: float=1.0,
swap: bool=False,
selective: bool=True,
reduction: Union[None, str]='mean',
dim: int=1,
eps: float=1e-08):
"""Create instance of Selectively Contrastive Triplet Loss'
Parameters
----------
lambda_: float, optinal, default=1.0
Lambda-Parameter of the 'Selectively Contrastive Triplet Loss'
swap: bool, optinal, default=True
The distance swap is described in detail in the paper 'Learning
shallow convolutional feature descriptors with triplet losses'
by V. Balntas, E. Riba et al.
selective: bool, optinal, default=True
Enables/disables the additional condition from the
'Selectively Contrastive Triplet Loss'. If False the loss
is a standard NCA loss.
reduction: None or str, default, optinal='mean'
Specifies the reduction to apply to the output: None/'none' | 'mean' | 'sum'.
dim: int, optinal, default=1
'dim'-parameter of the nn.CosineSimilarity function.
eps: float, optional, default=1e-08
'eps'-parameter of the nn.CosineSimilarity function.
"""
super().__init__()
self.sim = nn.CosineSimilarity(dim=dim, eps=eps)
self.swap = swap
self.lambda_ = lambda_
self.reduction = reduction
self.selective = selective
def forward(self, a: 'torch.Tensor', p: 'torch.Tensor', n: 'torch.Tensor') -> 'torch.Tensor':
"""Forward step of the step.
Parameters
----------
a: torch.Tensor
Embedding of the anchors
p: torch.Tensor
Embedding of the positive examples
n: torch.Tensor
Embedding of the negative examples
Returns
-------
torch.Tensor
Loss
"""
s_ap = self.sim(a, p)
s_an = self.sim(a, n)
if self.swap:
s_pn = self.sim(p, n)
mask = s_pn > s_an
s_an[mask] = s_pn[mask]
loss = torch.log(torch.exp(s_ap)+torch.exp(s_an)) - s_ap
if self.selective:
mask = s_an > s_ap
loss[mask] = self.lambda_ * s_an[mask]
if self.reduction == 'mean':
return torch.mean(loss)
elif self.reduction == 'sum':
return torch.sum(loss)
else:
return loss
class SelectContrastiveTripletMarginLoss(nn.Module):
"""'Selectively Contrastive Triplet Loss' as described
in https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123590120.pdf.
It is a standard TripletMarginLoss with an
additonal switch case. For triplet in wich the distance between
the anchor and the negative example is smaller than the distance
of the anchor and the positive example the loss is margin - dist(a, n).
"""
def __init__(self, margin=0.1, p=2, swap=False, selective=True, reduction='mean'):
"""Triplet Margin Loss with addtion from "Hard negative examples are hard, but useful"
Xuan et al.
All parameters execept for the 'selective' parameter are identical to
torch.nn.TripletMarginLoss. See its documentation for details.
Paramters
---------
margin: float, optinal, default=1.0
Margin used in the loss.
p: int, optional, default=2
The norm degree for pairwise distance.
swap: bool, optinal, default=True
The distance swap is described in detail in the paper 'Learning
shallow convolutional feature descriptors with triplet losses'
by V. Balntas, E. Riba et al.
selective: bool, optinal, default=True
Enables/disables the additional condition from the
'Selectively Contrastive Triplet Loss'. If False the loss
is a standard NCA loss.
reduction: None or str, default, optinal='mean'
Specifies the reduction to apply to the output: None/'none' | 'mean' | 'sum'.
"""
super().__init__()
self.swap = swap
self.p = p
self.margin = margin
self.reduction = reduction
self.selective = selective
def forward(self, a, p, n):
"""Forward step of the step.
Parameters
----------
a: torch.Tensor
Embedding of the anchors
p: torch.Tensor
Embedding of the positive examples
n: torch.Tensor
Embedding of the negative examples
Returns
-------
torch.Tensor
Loss
"""
d_ap = torch.norm(a-p, dim=1, p=self.p)
d_an = torch.norm(a-n, dim=1, p=self.p)
if self.swap:
d_pn = torch.norm(p-n, dim=1, p=self.p)
d_an = torch.where(d_pn < d_an, d_pn, d_an)
loss = d_ap - d_an + self.margin
if self.selective:
alt_loss = self.margin - d_an
loss = torch.where(d_ap > d_an, alt_loss, loss)
loss = torch.max(torch.zeros_like(loss), loss)
if self.reduction == 'mean':
return torch.mean(loss)
elif self.reduction == 'sum':
return torch.sum(loss)
else:
return loss
class CircleLoss(nn.Module):
"""Implementation of CircleLoss taken from:
https://github.com/TinyZeaMays/CircleLoss/blob/master/circle_loss.py
with minor adaptation. CircleLoss is introduce in the paper
"Circle Loss: A Unified Perspective of Pair Similarity Optimization"
Sun et al.
The basic idea is to build all possible triplets and weight them
according to their "difficulty".
"""
def __init__(self, m: float=0.5, gamma: float=256.) -> None:
"""
Parameters
----------
m: float, optinal, m=0.5
Margin
gamma: float, optinal, default=256.
Scale factor, check paper for details.
"""
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, normed_feature: torch.Tensor, label: torch.Tensor) -> torch.Tensor:
"""Forward step of the loss.
Parameters
----------
normed_feature: torch.Tensor
Embeddings
label: torch.Tensor
Class ids
Return
------
torch.Tensor
Loss
"""
inp_sp, inp_sn = self._convert_label_to_similarity(normed_feature, label)
return self._loss_from_similarity_tensors(inp_sp, inp_sn)
def _loss_from_similarity_tensors(self, sp: torch.Tensor, sn: torch.Tensor) -> torch.Tensor:
ap = torch.clamp_min(- sp.detach() + 1 + self.m, min=0.)
an = torch.clamp_min(sn.detach() + self.m, min=0.)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.logsumexp(logit_p, dim=0))
return loss
@staticmethod
def _convert_label_to_similarity(normed_feature: torch.Tensor, label: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
similarity_matrix = normed_feature @ normed_feature.transpose(1, 0)
label_matrix = label.unsqueeze(1) == label.unsqueeze(0)
positive_matrix = label_matrix.triu(diagonal=1)
negative_matrix = label_matrix.logical_not().triu(diagonal=1)
similarity_matrix = similarity_matrix.view(-1)
positive_matrix = positive_matrix.view(-1)
negative_matrix = negative_matrix.view(-1)
return similarity_matrix[positive_matrix], similarity_matrix[negative_matrix]
@META_ARCH_REGISTRY.register()
class TripletDistanceLearner(BaseModel):
"""Model for distance metric learning using triplets.
The model architecture is taken from sap_computer_vision.modelling.base.BaseModel.
The BaseModel is extended to support different loss functions and different sampling
mechanisms/strategies for triples. Check sap_computer_vision.data.samplers of
sampler options.
"""
@configurable
def __init__(self,
*,
backbone: 'nn.Module',
loss: Callable,
out_dim: Union[None, int]=None,
dropout: Union[None, float]=0.5,
pixel_mean: 'torch.Tensor',
pixel_std: 'torch.Tensor',
image_size: Union[None, int, Tuple[int, int]]=None,
intermediate_sizes: Union[None, List[str]]=None,
input_feats: Union[None, List[str]]=None,
triplet_strategy: Union[None, Tuple[str, str]]=None,
pooling: Union[bool, str]=True,
normalize_output: bool=True,
freeze_backbone: bool=False,
dist_norm: float=2.,
activation_projection_layer: Union[str, None]='ReLU'):
"""Create model instances.
Parameters not documented here are documented for the BaseModel.
Parameters
----------
loss: nn.Module
Loss function.
out_dim: int or None
Dimensionality of the feature vectors.
If None the last layer is nn.Identity.
triplet_strategy: None or Tuple(str/float, str/float), optional, defaul=None
When using PKSampler the triplets are created within the model.
If the used sampler already creates triplets the strategy should
be None. The strategy should also be None when using CircleLoss
(CircleLoss expects PKSampler, but the loss function creates the
triplets). To specify a triplet (pos_strat, neg_strat) is expected.
Options are 'rand'/'min'/'max' or a float between 0 and 1.
- None: No stragty.
- rand: random
- min: minimal distance to the anchor
- max: maximal distance to the anchor
- float: loc of triplet_sampling_utils.SkedNormalSampler
dist_norm: float, optinal, default=2.
The norm degree for pairwise distance calculated for
most triplet strategies.
"""
super().__init__(
backbone=backbone,
out_dim=out_dim,
intermediate_sizes=intermediate_sizes,
dropout=dropout,
image_size=image_size,
pixel_mean=pixel_mean,
pixel_std=pixel_std,
input_feats=input_feats,
pooling=pooling,
normalize_output=normalize_output,
freeze_backbone=freeze_backbone,
activation_projection_layer=activation_projection_layer)
self.criterion = loss
self.dist_norm = dist_norm
self.triplet_strategy = self.build_triplet_strategy(triplet_strategy, self.criterion)
@staticmethod
def build_triplet_strategy(strategy: Union[None, Tuple[Union[str, float], Union[str, float]]],
criterion: Union[None, 'nn.Module']=None) -> Tuple[Union[str, Callable], Union[str, Callable]]:
"""Function used to check triplet strategy and create callable if needed.
If strategy is not None and and criterion is not Circle loss,
sap_computer_vision.data.triplet_sampling_utils.build_triplet_strategy is called.
Parameters
----------
strategy
Strategy for triplet sampling.
Check sap_computer_vision.data.triplet_sampling_utils.build_triplet_strategy for details.
Returns
-------
Tuple(str/callable, str/callable)
Tuple (pos_strat, neg_strat).
"""
if strategy is None or isinstance(criterion, CircleLoss):
if not (isinstance(strategy, str) and strategy.lower() == 'none') and strategy is not None:
logger.warning('When using CircleLoss no triplet strategy is used!')
return None
return build_triplet_strategy(strategy)
@classmethod
def from_config(cls, cfg: CfgNode) -> Dict:
"""Classmethod to create an instance based on the config.
Check detectron configs mechanism.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
dict
Dict with the relevant kwargs. This dict can be consumed by the
__init__ function.
"""
kwargs_base = BaseModel.from_config(cfg)
loss, dist_norm = cls.build_loss(cfg)
kwargs_clf = {
"triplet_strategy": cfg.MODEL.TRIPLET_DISTANCE_LEARNER.TRIPLET_STRATEGY,
"dist_norm": dist_norm,
"loss": loss
}
return {**kwargs_base, **kwargs_clf}
@staticmethod
def build_loss(cfg: CfgNode, name: Union[None, str]=None) -> 'nn.Module':
"""Build loss functions
Parameters
----------
cfg: CfgNode
Configuration
name: str or None, optinal,
Name of the loss available options are:
- MARGIN_LOSS
- NCA_LOSS
- CIRCLE_LOSS
If None cfg.MODEL.TRIPLET_DISTANCE_LEARNER.LOSS is used.
Returns
-------
nn.Module
Loss function
"""
if name is None:
name = cfg.MODEL.TRIPLET_DISTANCE_LEARNER.LOSS.lower()
if name.upper() == 'CIRCLE_LOSS':
if cfg.DATALOADER.SAMPLER_TRAIN != 'PKSampler':
raise ValueError('`CIRCLE_LOSS` can only be used wiht `PKSampler` as the sampler for training.')
criterion = CircleLoss(m=cfg.MODEL.TRIPLET_DISTANCE_LEARNER.CIRCLE_LOSS.MARGIN,
gamma=cfg.MODEL.TRIPLET_DISTANCE_LEARNER.CIRCLE_LOSS.GAMMA)
dist_norm = None
elif name.upper() == 'MARGIN_LOSS':
kwargs = {
'margin': cfg.MODEL.TRIPLET_DISTANCE_LEARNER.MARGIN_LOSS.MARGIN,
'p': cfg.MODEL.TRIPLET_DISTANCE_LEARNER.MARGIN_LOSS.NORM,
'swap': cfg.MODEL.TRIPLET_DISTANCE_LEARNER.MARGIN_LOSS.SWAP,
'reduction': 'mean'
}
if cfg.MODEL.TRIPLET_DISTANCE_LEARNER.MARGIN_LOSS.SELECTIVE:
criterion = SelectContrastiveTripletMarginLoss(selective=True, **kwargs)
else:
criterion = nn.TripletMarginLoss(**kwargs)
dist_norm = kwargs['p']
elif name.upper() == 'NCA_LOSS':
kwargs = {
'lambda_': cfg.MODEL.TRIPLET_DISTANCE_LEARNER.NCA_LOSS.LAMBDA,
'selective': cfg.MODEL.TRIPLET_DISTANCE_LEARNER.NCA_LOSS.SELECTIVE,
'swap': cfg.MODEL.TRIPLET_DISTANCE_LEARNER.NCA_LOSS.SWAP
}
criterion = SelectiveContrastiveTripletNCALoss(dim=1, eps=1e-08, reduction='mean', **kwargs)
dist_norm = 'cosine'
else:
raise ValueError(f'{name} is not a supported loss function.')
return criterion, dist_norm
def forward(self,
batched_inputs: List[Dict[str, 'torch.Tensor']],
return_pooled_features: bool=False) -> 'torch.Tensor':
"""Forward step of the model.
Parameters
----------
batched_inputs: list of dicts
Input batch
return_pooled_features: bool, optinal, default=False
If only the embeddings (False) or the embeddings and the
pooled/unpooled backbone features should be returned.
The backbone features can be used to visualize
similarities as seen by the model between images.
Check sap_computer_vision.utils.deep_similarity.
Returns
-------
torch.Tensor or Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
Returns the embedding tensor and if return_pooled_features=True
pooled and unpoold backbone features.
"""
if self.training:
if isinstance(self.criterion,CircleLoss):
normalized_features = self.inference(batched_inputs)
labels = torch.tensor([i['class_id'] for i in batched_inputs], dtype=torch.long).to(self.device)
return self.criterion(normalized_features, labels)
else:
anchors, pos, neg = self.forward_for_triplets(batched_inputs)
return self.criterion(anchors, pos, neg)
else:
return self.inference(batched_inputs, return_pooled_features=return_pooled_features)
def forward_for_triplets(self, batched_inputs: List[Dict[str, 'torch.Tensor']]) -> Tuple['torch.Tensor', 'torch.Tensor', 'torch.Tensor']:
"""Forward step during Training if loss is not CircleLoss.
The input data are triplets or triplets have to be sampled from the batch.
Parameters
----------
batched_inputs: list of dicts
Input batch
Returns
-------
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
Returns embeddings for (anchors, positive examples, negative examples).
"""
if self.triplet_strategy is None: # Inpute is expected to already contain triplet dicts
anchor = self.inference([trip['anchor'] for trip in batched_inputs])
pos = self.inference([trip['pos'] for trip in batched_inputs])
neg = self.inference([trip['neg'] for trip in batched_inputs])
return anchor, pos, neg
else:
try:
labels = torch.tensor([i['class_id'] for i in batched_inputs], dtype=torch.long).to(self.device)
except KeyError as err:
raise KeyError('`batched_input` has no key `class_id`. This is either because a dataset without labels or with triplets was used as the train dataset. '
'To train with a dataset of triplets set `MODEL.TRIPLET_DISTANCE_LEARNER.TRIPLET_STRATEGY=EXTERNAL`.') from err
fw_results = self.inference(batched_inputs)
a, p, n = create_triplets_from_pk_sample(fw_results,
labels,
strategy_pos=self.triplet_strategy[0],
strategy_neg=self.triplet_strategy[1],
dist_norm=self.dist_norm,
anchor_pos_combinations_unique=getattr(self.criterion, 'swap', False))
return fw_results[a], fw_results[p], fw_results[n]
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/modelling/distance_metric_learner.py
| 0.972663 | 0.481759 |
distance_metric_learner.py
|
pypi
|
import logging
from typing import Union, List, Dict
import torch
from detectron2.modeling import BACKBONE_REGISTRY, Backbone
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone.fpn import LastLevelMaxPool, FPN
try:
import timm
_timm_available = True
except ImportError:
_timm_available = False
logger = logging.getLogger(__name__)
class TimmBackbone(Backbone):
"""Backbone class to wrap timm models as backbones for detectron2.
"""
def __init__(self,
model_name: str,
pretrained: bool=True,
out_features: Union[None, List[str]]=None,
features_only: bool=True):
"""Create backbone based on a timm model.
Normal detectron backbone weights are initialized using e.g. the
trainer checkpointer or trainer.resume_or_load. timm-Models are
initialized during creation. The 'pretrained' enables/disables
to loading process of pretrained weights.
Parameters
----------
model_name: str
Name of the model. Check timm documentation for options.
pretrained: bool, optinal, default=True
Load pretrained weights.
out_features: None, optional, default=None
Name of backbones layer included in the backbone output.
If None the default layers specified by the timm library
is used.
features_only: bool, optional, default=True
To use the models as backbones the head used during the pretraining
should be removed. Many models form the timm library can be
explicitly created as feature extractors using the
'features_only' parameter during model creation.
All other models are created as classifiers and the classification
head is chopped off after creation. This is automatically done
during init of the class if features_only=False. As a general rule
always try to features_only=True. If it crashed during creationg
try features_only=False. Check:
https://rwightman.github.io/pytorch-image-models/feature_extraction/
for more details.
"""
if not _timm_available:
raise ImportError('Missing dependency. Install \'timm\' to use a TimmBackbone (`pip install timm`).')
super(TimmBackbone, self).__init__()
self.model = timm.create_model(model_name,
pretrained=pretrained,
features_only=features_only,
out_indices=out_features)
self.pixel_mean = [x*255 for x in self.model.default_cfg['mean']]
self.pixel_std = [x*255 for x in self.model.default_cfg['std']]
self.features_only = features_only
if features_only:
self._output_names = []
self._output_shapes = {}
for info in self.model.feature_info.info:
name = info['module']
self._output_names.append(name)
self._output_shapes[name] = ShapeSpec(channels=info['num_chs'], stride=info['reduction'])
else:
self.model.reset_classifier(0)
self._output_names = ['forward_features']
self._output_shapes = {'forward_features': ShapeSpec(channels=self.model.num_features)}
def forward(self, x: 'torch.Tensor') -> Dict[str, 'torch.Tensor']:
"""Forward step of the model
Parameter
---------
x: torch.Tensor
Images tensors
Returns
-------
Dict of torch.Tensors
Image features
"""
model_output = self.model(x)
if not self.features_only:
model_output = (model_output, )
return {k: o_i for k, o_i in zip(self._output_names, model_output)}
def output_shape(self) -> 'ShapeSpec':
"""
Returns
-------
ShapeSpec
ShapeSpec of the model output
"""
return self._output_shapes
def output_names(self) -> List['str']:
"""
Returns
-------
list of str
Names of the image features.
The names are used as keys in the dict returned by the forward step.
"""
return self._output_names
@BACKBONE_REGISTRY.register()
def build_timm_backbone(cfg, *args, **kwargs) -> 'Backbone':
"""Registered backbone function to create a plain TimmBackbone.
All options for the Backbone are specified in the config.
The relevant CfgNodes are cfg.MODEL.TIMM.
Parameter
---------
cfg: CfgNode
Config
Retruns
-------
Backbone
"""
model_args = {
'model_name': cfg.MODEL.TIMM.NAME,
'pretrained': cfg.MODEL.TIMM.PRETRAINED,
'out_features': cfg.MODEL.TIMM.get('OUT_INDICES', None),
'features_only': cfg.MODEL.TIMM.get('FEATURES_ONLY', True)
}
if cfg.INPUT.FORMAT.upper() != 'RGB':
raise ValueError('Models form `timm` are using images in the RGB ordering!')
model = TimmBackbone(**model_args)
fixed_input = model.model.default_cfg.get('fixed_input_size', False)
cfg_fixed_input = cfg.INPUT.get('FIXED_IMAGE_SIZE', None)
if cfg_fixed_input is None and fixed_input:
logger.warning(f'The backbone expected a fixed input of shape {fixed_input}. `cfg.INPUT.FIXED_IMAGE_SIZE` not set!')
return model
@BACKBONE_REGISTRY.register()
def build_timm_fpn_backbone(cfg, *args, **kwargs) -> 'Backbone':
"""Registered backbone function to create a TimmBackbone wrapped in a FPN.
This most probably only works for CNN based models.
All options for the Backbone are specified in the config.
The relevant CfgNodes are cfg.MODEL.TIMM and cfg.MODEL.FPN
Parameter
---------
cfg: CfgNode
Config
Retruns
-------
Backbone
"""
if not cfg.MODEL.TIMM.get('FEATURES_ONLY', True):
raise ValueError('To build a FPN with a timm cnn model as a backbone `MODEL.TIMM.FEATURES_ONLY` '
'has to be `True`')
bottom_up = build_timm_backbone(cfg)
in_features = bottom_up.output_names()
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/modelling/backbones/timm_backbones.py
| 0.912651 | 0.394697 |
timm_backbones.py
|
pypi
|
from typing import Dict, Union, List
import itertools
import logging
import torch
import numpy as np
from detectron2.config import configurable, CfgNode
import detectron2.utils.comm as comm
logger = logging.getLogger(__name__)
class ImageClassificationEvaluator:
"""Detectron2 compatible evaluator to get metrics for image classification use cases
The metrics are calculated using function get_metrics.
See its documentation for details.
"""
@configurable
def __init__(self, n_classes, beta=1.0, multi_label=False, distributed=False, class_names=None):
"""Create Evaluator instance. This class is intended to be used by the trainer.
To manually determine metrics it is easier to use the get_metrics function directly.
Parameters
----------
n_classes: int
Number of classes.
beta: float
F1 score beta.
multi_label: bool, optional, default=True
This is just a placeholder. Evaluation of for multi_label predictions
is not yet implemented!
class_names: None or list of str, optinal, default=None
Class id to str mapping. If provided the results
will be 'accuracy:<class_name>' instead of
'accuracy:<class_id>'.
distributed: bool, optional, default=False
WARNING: not tested
In principal this evaluator can be used in a distributed training
scenario.
"""
self._n_classes = n_classes
self._beta = beta
self._class_names = None
self._distributed = distributed
self._multi_label = multi_label
self._class_names = class_names
self._binary_classifier = self._n_classes == 2 and not self._multi_label
self.reset()
@classmethod
def from_config(cls, cfg: 'CfgNode') -> Dict:
"""Classmethod to create an instance based on the config.
Check detectron configs mechanism.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
dict
Dict with the relevant kwargs. This dict can be consumed by the
__init__ function.
"""
return {'beta': cfg.EVAL.IMAGE_CLASSIFICATION.BETA,
'distributed': cfg.SOLVER.REFERENCE_WORLD_SIZE > 0,
'multi_label': cfg.MODEL.IMAGE_CLASSIFIER.MULTI_LABEL,
'n_classes': cfg.MODEL.IMAGE_CLASSIFIER.NUM_CLASSES,
'class_names': cfg.get('TRAINING_INFO', {}).get('THING_CLASSES', None)
}
def process(self, inputs, outputs):
"""Function called by the trainer after each prediction step.
This functions stores all relevant results.
Parameters
----------
inputs: dict
Model input dict
outputs: torch.Tensor
Model output (logits)
"""
for inp, out in zip(inputs, outputs):
if self._multi_label:
raise NotImplementedError
else:
truth = inp['class_id']
if self._multi_label:
raise NotImplementedError
elif self._binary_classifier:
predicted = int(out > 0.5)
else:
predicted = int(torch.argmax(out))
self._predictions.append(predicted)
self._truth.append(truth)
def reset(self):
"""Reset all stored results."""
self._predictions = []
self._truth = []
def evaluate(self) -> Dict[str, float]:
"""Evaluate based on stored results.
Returns
-------
dict
Dict containing metrics.
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
truth = comm.gather(self._truth, dst=0)
truth = list(itertools.chain(*truth))
if not comm.is_main_process():
return
else:
predictions = self._predictions
truth = self._truth
if self._multi_label:
raise NotImplementedError
else:
return get_metrics(predictions,
truth,
beta=self._beta,
class_names=self._class_names,
return_confusion_matrix=False)
def get_metrics(predictions: 'np.array',
truth,
beta: float=1.0,
return_confusion_matrix: bool=True,
class_names: Union[None, List[str]]=None):
""""Function to calculate metrics for image classification.
For giving predictions and true labels vales accuracies, recall values and F1 scores
are calculated for every class individually and all classes combined.
Parameters
----------
predictions: np.array(feat_dim) of int
Predicted class id.
truth: np.array(n) of ints
True class id.
beta: float, optional, default=1.0
F1 score beta.
return_confusion_matrix: bool, optinal, default=True
Return confusion matrix.
class_names: None or list of str, optinal, default=None
Class id to str mapping. If provided the results
will be 'accuracy:<class_name>' instead of
'accuracy:<class_id>'.
Returns
----------
dict(str, float)
Dictionary with the different metrics
"""
if class_names is None:
n_classes = len(np.unique(truth))
class_names = [str(i) for i in range(n_classes)]
else:
n_classes = len(class_names)
confusions = np.zeros((n_classes, n_classes), dtype=np.int32)
for t, p in zip(truth, predictions):
confusions[t, p] += 1
results = {}
results['accuracy'] = np.sum(np.diag(confusions)) / np.sum(confusions)
for i, n in enumerate(class_names):
precision = confusions[i, i] / np.sum(confusions[i, :])
recall = confusions[i, i] / np.sum(confusions[:, i])
results[f'accuracy:{n}'] = precision
results[f'recall:{n}'] = recall
results[f'f1score:{n}'] = (1+beta)**2 * (precision * recall) / ((beta**2 * precision) + recall)
if return_confusion_matrix:
results['confusions'] = confusions.copy()
return results
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/evaluators/image_classification.py
| 0.935243 | 0.331539 |
image_classification.py
|
pypi
|
import logging
from collections import defaultdict, namedtuple
import itertools
from typing import Dict, Iterable, List, Any, Union
import numpy as np
import torch
from detectron2.data import DatasetCatalog
import detectron2.utils.comm as comm
from detectron2.structures import Boxes, pairwise_iou
from detectron2.modeling.matcher import Matcher
from detectron2.config import configurable, CfgNode
from detectron2.structures import Instances
DetectionBox = namedtuple('DetectionBox', ['image_id', 'score', 'cls', 'xmin', 'ymin', 'xmax', 'ymax'])
class ObjectDetectionEvaluator:
"""Detectron2 compatible evaluator to get metrics for image classification use cases
The metrics are calculated using function get_metrics.
See its documentation for details.
"""
@configurable
def __init__(self,
dataset_names: Iterable[str],
thresholds=None,
min_iou_for_hit=0.5,
box_overlap_cut=None,
box_overlap_keep='biggest',
ignore_labels_for_overall_performance=False,
class_names=None,
distributed: bool=False):
"""Create Evaluator instance. This class is intended to be used by the trainer.
To manually determine metrics it is easier to use the get_metrics function directly.
Parameters
----------
dataset_names: None or iterable of ints, optional, default=None
Different k values used in the evaluation.
The values have to be > 0.
thresholds: None or np.array of floats
Confidence cuts used to calculate the mAP.
If None the default thresholds are used.
min_iou_for_hit: float, optional, default=0.5
Min iou (intersection over union) for a predicted bbox and a true bbox to be
considered a hit.
box_overlap_cut: None or float, optional, default=None
Optional cut to reject overlapping predictions.
If 'None' no cut is applied.
box_overlap_keep: str, optional, default='biggest'
When applying a cut for overlapping boxex the 'biggest'
or 'biggest' bbox or the bbox with the 'highest_score'
will be kept.
ignore_labels_for_overall_performance: bool, optional, default=False
If True the label of bbox are ignored when calculating
the overall performance.
class_names: None or list of str, optinal, default=None
Class id to str mapping. If provided the results
will be 'mAP:<class_name>' instead of
'mAP:<class_id>'.
distributed: bool, optional, default=False
WARNING: not tested
In principal this evaluator can be used in a distributed training
scenario.
"""
self._min_iou_for_hit = min_iou_for_hit
self._box_overlap_cut = box_overlap_cut
self._box_overlap_keep = box_overlap_keep
self._ignore_labels_for_overall_performance = ignore_labels_for_overall_performance
self._thresholds = thresholds
self._distributed = distributed
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
self._ground_truth = {}
self._class_names = class_names
for dataset_name in dataset_names:
self._dataset_name = dataset_name
self._ground_truth = {**self._ground_truth, **{v['image_id']: v for v in DatasetCatalog.get(dataset_name)}}
self.reset()
@classmethod
def from_config(cls, cfg: 'CfgNode') -> Dict:
"""Classmethod to create an instance based on the config.
Check detectron configs mechanism.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
dict
Dict with the relevant kwargs. This dict can be consumed by the
__init__ function.
"""
attr = {'dataset_names': cfg.DATASETS.TEST,
'distributed': cfg.SOLVER.REFERENCE_WORLD_SIZE > 0,
'thresholds': cfg.EVAL.OBJECT_DETECTION.THRESHOLDS,
'min_iou_for_hit': cfg.EVAL.OBJECT_DETECTION.MIN_IOU_FOR_HIT,
'box_overlap_cut': cfg.EVAL.OBJECT_DETECTION.BOX_OVERLAP_CUT,
'box_overlap_keep': cfg.EVAL.OBJECT_DETECTION.BOX_OVERLAP_KEEP,
'ignore_labels_for_overall_performance': cfg.EVAL.OBJECT_DETECTION.IGNORE_CLASS_LABELS_FOR_OVERALL_PERFORMANCE,
'class_names': cfg.get('TRAINING_INFO', {}).get('THING_CLASSES', None)}
return attr
def reset(self):
"""Reset all stored results."""
self._predictions = {}
def process(self, inputs, outputs):
"""Function called by the trainer after each prediction step.
This functions stores all relevant results.
Parameters
----------
inputs: dict
Model input dict
outputs: torch.Tensor
Model output (logits)
"""
for input, output in zip(inputs, outputs):
image_id = input["image_id"]
if "instances" not in output.keys():
raise ValueError('`output` does not contain `instances`. Probably the model is not an '
'object detection model outputting bounding boxes.')
instances = output["instances"].to(self._cpu_device)
if image_id in self._predictions:
raise RuntimeError('`image_id` appeared twice!')
self._predictions[image_id] = instances
def evaluate(self):
"""Evaluate based on stored results.
Returns
-------
dict
Dict containing metrics.
"""
if self._distributed:
comm.synchronize()
_predictions = comm.gather(self._predictions, dst=0)
_predictions = list(itertools.chain(*_predictions))
_ground_truth = comm.gather(self._ground_truth, dst=0)
_ground_truth = list(itertools.chain(*_ground_truth))
if not comm.is_main_process():
return
else:
_predictions = self._predictions
_ground_truth = self._ground_truth
ground_truth = []
predictions = []
for img_id, prediction in _predictions.items():
ground_truth.append(self._ground_truth[img_id])
predictions.append(prediction)
results = evaluate_box_proposals(predictions,
ground_truth,
thresholds=self._thresholds,
min_iou_for_hit=self._min_iou_for_hit,
pred_box_overlap_threshold=self._box_overlap_cut,
keep_overlapping_box=self._box_overlap_keep,
ignore_class_label_for_overall_performance=self._ignore_labels_for_overall_performance,
class_names=self._class_names)
return results
def evaluate_box_proposals(predictions: List[Instances],
ground_truth: List[Dict[str, Any]],
thresholds: Union[None, 'np.array']=None,
min_iou_for_hit: float=0.5,
pred_box_overlap_threshold: Union[None, float]=None,
keep_overlapping_box: str='biggest',
ignore_class_label_for_overall_performance: bool=False,
add_curve_points: bool=False,
class_names: Union[None, List[str]]=None):
"""Create Evaluator instance. This class is intended to be used by the trainer.
To manually determine metrics it is easier to use the get_metrics function directly.
Parameters
----------
predictions: list of detectron2.structures.Instances
List of model predictions. The model predictions are of type
Instances. Those Instances should have values for
"pred_boxes", "pred_classes", "scores".
ground_truth: list of dicts
List of grouth truths. The dicts are detectron2's lightweight
dict formats of the datasets. It is return format of
DatasetCatalog.get(dataset_name).
thresholds: None or np.array of floats
Confidence cuts used to calculate the mAP.
If None the default thresholds are used [0.05, 0.1, ... 0.95].
min_iou_for_hit: float, optional, default=0.5
Min iou (intersection over union) for a predicted bbox and a true bbox to be
considered a hit.
pred_box_overlap_threshold: None or float, optional, default=None
Optional cut to reject overlapping predictions.
If 'None' no cut is applied.
keep_overlapping_box: str, optional, default='biggest'
When applying a cut for overlapping boxex the 'biggest'
or 'biggest' bbox or the bbox with the 'highest_score'
will be kept.
ignore_class_label_for_overall_performance: bool, optional, default=False
If True the label of bbox are ignored when calculating
the overall performance.
add_curve_points: bool, optional, default=False
If True the individual points from the mAP calculation are added to the
return dict.
class_names: None or list of str, optinal, default=None
Class id to str mapping. If provided the results
will be 'mAP:<class_name>' instead of
'mAP:<class_id>'.
"""
pred_box_overlap_threshold = None if not isinstance(pred_box_overlap_threshold, float) else pred_box_overlap_threshold
if not thresholds:
thresholds = np.arange(0.05, 0.95+1e-5, 0.05)
scores = defaultdict(lambda: defaultdict(lambda: {'tp': 0, 'fp': 0, 'fn': 0}))
for prediction, gt in zip(predictions, ground_truth):
gt_boxes = Boxes(torch.as_tensor([p["bbox"] for p in gt["annotations"]], dtype=float).reshape(-1, 4))
gt_classes = np.array([p["category_id"] for p in gt["annotations"]], dtype=int)
pred_boxes = prediction.pred_boxes
pred_classes = prediction.pred_classes.numpy().astype(int)
pred_scores = prediction.scores.numpy()
classes = set(gt_classes.tolist()).union(pred_classes.tolist())
if ignore_class_label_for_overall_performance:
classes.add(-1)
for cls in classes:
if cls == -1:
gt_mask_cls = [True] * len(gt_classes)
pred_mask_cls = [True] * len(pred_classes)
else:
gt_mask_cls = gt_classes == cls
pred_mask_cls = pred_classes == cls
gt_boxes_cls = Boxes(gt_boxes.tensor[gt_mask_cls])
pred_boxes_cls = Boxes(pred_boxes.tensor[pred_mask_cls])
pred_scores_cls = pred_scores[pred_mask_cls]
if pred_box_overlap_threshold:
if keep_overlapping_box.lower() == 'biggest':
order = np.argsort(pred_boxes_cls.area)
elif keep_overlapping_box.lower() == 'highest_score':
order = np.argsort(pred_scores_cls)
else:
raise ValueError('Implemented options for `keep_overlapping_box` are `biggest` and `highest_score`.')
pred_boxes_cls.tensor = pred_boxes_cls.tensor[order]
pred_scores_cls = pred_scores_cls[order]
iou = pairwise_iou(pred_boxes_cls, pred_boxes_cls)
keep = np.ones_like(pred_scores_cls, dtype=bool)
for idx, box in enumerate(pred_boxes_cls):
if idx == 0:
continue
keep[idx] = all(iou[idx, :idx] < pred_box_overlap_threshold)
pred_boxes_cls = pred_boxes_cls[keep]
pred_scores_cls = pred_boxes_cls[keep]
iou = pairwise_iou(gt_boxes_cls, pred_boxes_cls)
matcher = Matcher([min_iou_for_hit], [0, 1])
matches, match_labels = matcher(iou)
match_labels = match_labels.numpy().astype(bool)
matches = matches.numpy()
for thresh in thresholds:
not_ignore = pred_scores_cls >= thresh
is_match = np.logical_and(not_ignore, match_labels)
matches_bool = np.zeros(len(gt_boxes_cls), dtype=bool)
matches_bool[np.unique(matches[is_match])] = True
tp_i = np.sum(matches_bool)
scores[thresh][cls]['tp'] += tp_i
scores[thresh][cls]['fn'] += len(gt_boxes_cls) - tp_i
scores[thresh][cls]['fp'] += np.sum(np.logical_and(not_ignore, np.logical_not(match_labels)))
def get_recall_precision_auc(thresholds, dict_f, sum_dict=None):
recall = np.zeros(len(thresholds), dtype=float)
precision = np.zeros(len(thresholds), dtype=float)
for i, t in enumerate(thresholds):
tp = dict_f(t)['tp']
fp = dict_f(t)['fp']
fn = dict_f(t)['fn']
recall[i] = tp / (tp + fn) if tp + fn > 0 else 0.
precision[i] = tp / (tp + fp) if tp + fp > 0 else 0.
if sum_dict is not None:
sum_dict[t]['tp'] += tp
sum_dict[t]['fp'] += fp
sum_dict[t]['fn'] += fn
if add_curve_points:
return {'cls-recall': recall.tolist(),
'cls-precision': precision.tolist(),
'cls-thresholds': thresholds.tolist(),
'mAP': _call_mAP(recall, precision)}
else:
return _call_mAP(recall, precision)
metrics = {}
all_classes = set([*itertools.chain(*[[*v.keys()] for v in scores.values()])])
values_all_classes = defaultdict(lambda: {'tp': 0, 'fp': 0, 'fn': 0})
for cls in all_classes:
values = get_recall_precision_auc(thresholds, lambda t: scores[t][cls], values_all_classes)
if cls == -1:
cls = 'allClasses'
else:
cls = str(cls) if class_names is None else class_names[int(cls)]
metrics[str(cls) if add_curve_points else f'mAP/{cls}'] = values
if not ignore_class_label_for_overall_performance:
metrics['allClasses' if add_curve_points else 'mAP/allClasses'] = get_recall_precision_auc(thresholds, lambda t: values_all_classes[t])
return metrics
def _call_mAP(recall, precision, points=np.arange(0., 1.0+1e-5, 0.1)):
order = np.argsort(recall)
recall = recall[order]
precision = precision[order]
return np.mean([np.max(precision[recall >= p], initial=0) for p in points])
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/evaluators/object_detection_pascal_voc_style.py
| 0.877955 | 0.305361 |
object_detection_pascal_voc_style.py
|
pypi
|
from typing import NoReturn, Union, Iterable, NoReturn, Dict
import itertools
import torch
import numpy as np
from detectron2.config import configurable
from scipy.spatial.distance import pdist, cdist, squareform
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
class ContrastiveEvaluator:
"""Detectron2 compatible evaluator to get metrics for triplet distance learning.
The metrics are calculated using function get_metrics.
See its documentation for details.
"""
@configurable
def __init__(self, ks: Union[None, Iterable[int]]=None, metric='euclidean', metric_kwargs={}, distributed=False):
"""Create Evaluator instance. This class is intended to be used by the trainer.
To manually determine metrics it is easier to use the get_metrics function directly.
Parameters
----------
ks: None or iterable of ints, optional, default=None
Different k values used in the evaluation.
The values have to be > 0.
metric: str, optional, default=None
Metric used during evaluation to determine the distance between embeddings.
Check get_metrics for details.
metric_kwargs: dict, optional, default={}
kwargs passed to scipy.spatial.distance.pdist/ddist
Check get_metrics for details.
distributed: bool, optional, default=False
WARNING: not tested
In principal this evaluator can be used in a distributed training
scenario.
"""
if ks is None:
ks = [1, 2, 3]
self._ks = ks
self._metric = metric
self._metric_kwargs = {} if metric_kwargs is None else metric_kwargs
self._distributed = distributed
self._cpu_device = torch.device("cpu")
self.reset()
def process(self, inputs: Dict, outputs: 'torch.Tensor') -> NoReturn:
"""Function called by the trainer after each prediction step.
This functions stores all relevant results.
Parameters
----------
inputs: dict
Model input dict
outputs: torch.Tensor
Embeddings calculated by the model.
"""
self._labels.extend([inp['class_id'] for inp in inputs])
self._embeddings.extend(outputs.to(self._cpu_device).tolist())
@classmethod
def from_config(cls, cfg: 'CfgNode') -> Dict:
"""Classmethod to create an instance based on the config.
Check detectron configs mechanism.
Parameters
----------
cfg: CfgNode
Config
Returns
-------
dict
Dict with the relevant kwargs. This dict can be consumed by the
__init__ function.
"""
attr = {'ks': cfg.EVAL.CONTRASTIVE.TOP_KS,
'metric': cfg.EVAL.CONTRASTIVE.METRIC,
'metric_kwargs': cfg.EVAL.CONTRASTIVE.get('METRIC_KWARGS', {}),
'distributed': cfg.SOLVER.REFERENCE_WORLD_SIZE > 0,}
return attr
def reset(self) -> NoReturn:
"""Reset all stored results."""
self._embeddings = []
self._labels = []
def evaluate(self) -> Dict[str, float]:
"""Evaluate based on stored results.
Returns
-------
dict
Dict containing metrics.
"""
if self._distributed:
comm.synchronize()
embeddings = comm.gather(self._embeddings, dst=0)
embeddings = list(itertools.chain(*embeddings))
labels = comm.gather(self._labels, dst=0)
labels = list(itertools.chain(*labels))
if not comm.is_main_process():
return
else:
embeddings = self._embeddings
labels = self._labels
embeddings = np.array(embeddings)
labels = np.array(labels)
return get_metrics(embeddings, labels, ks=self._ks, metric=self._metric, **self._metric_kwargs)
def _get_mask(label_i: 'np.array', labels: 'np.array') -> 'np.array':
mask = labels == label_i
n = np.sum(mask)
return mask, n
def get_metrics(embeddings: 'np.array',
labels: 'np.array',
index_embeddings: Union[None, 'np.array']=None,
index_labels: Union[None, 'np.array']=None,
ks: Union[None, Iterable[int]]=None,
metric: str='euclidean',
**metric_kwargs) -> Dict[str, float]:
""""Function to calculate metrics for contrastive/distance learning applications.
For giving embeddings and labels the `top_k_accuracy`, `rank_rate_k` and `mean_dist_pos/neg` are calculated.
top_k_accuracy:
For every test case it is check of an example of the same class is within the top k results.
If it is the case this is counted as 1 and if not as 0. For the accuracy the counts are devided
by the number of tests.
rank_rate_k:
For every test case the index/rank `r` of the nearest neighbor of the same class is determined.
The ranks starts with 0. If there is no example of the same class among the k closest neighbors
r=k. With thoses ranks the mean of (k-r) / k is calcuated.
mean_dist_pos/mean_dist_neg:
For every test_case the mean distance to examples of the same class and to examples of all other
classes is calculated. Those distances are average and returned.
To calculate distance metrics scipy.distance.pdist/cdist are used.
Parameters
----------
embeddings: np.array(n, feat_dim) of floats
Embeddings vector for the test cases.
labels: np.array(n) of ints
Label ids to determine positive/negative examples.
index_embeddings: None or np.array(m, feat_dim) of floats, optional, default=None
Embeddings vector for the index cases.
If none the for every test case the n-1 remaing test cases are used as the index.
index_labels: None or np.array(m) of ints, optional, default=None
Label ids to determine positive/negative examples.
ks: Iterable(int), optional, default=None
Different `k` values used to calculate the rank rate and the accuracy.
If `None` [1, 3, 5, 10] is used.
metric: str, optional, default='euclidean'
Metric used to calculate the distance. For distance calculations
scipy.distance.pdist/cdist are being used. See scipy documentation for
vailable options.
**metric_kwargs
All other keyword arguments are passed to the scipy.distance.pdist/cdist
functions. See scipy documentation for
vailable options.
Returns
----------
dict(str, float)
Dictionary with the different metrics
Raises
------
ValueError
If `index_embeddigns` and `index_labels` have different length.
"""
if ks is None:
ks = [1, 3, 5, 10]
metrics = {**{f'top_{k}_acc': 0 for k in ks}, **{f'rank_rate_{k}': 0 for k in ks}}
if index_embeddings is None:
expanded_matrix = squareform(pdist(embeddings, metric=metric, **metric_kwargs))
index_labels = labels
min_n = 2
else:
expanded_matrix = cdist(embeddings, index_embeddings, metric=metric, **metric_kwargs)
if index_labels is None or len(index_labels) != len(index_embeddings):
raise ValueError("When using `index_embeddings` `index_labels` with the same length "
"must be provided to the function")
min_n = 1
cls_masks = {}
dists_pos = []
dists_neg = []
tested = 0
for i, l_i in enumerate(labels):
cls_mask, n = cls_masks.setdefault(l_i, _get_mask(l_i, index_labels))
if n < min_n:
continue
tested += 1
dists_neg.append(np.mean(expanded_matrix[i, np.logical_not(cls_mask)]))
dists_pos.append(np.mean(expanded_matrix[i, cls_mask]))
order = np.argsort(expanded_matrix[i, :])
if index_embeddings is None:
dists_pos[-1] *= n/(n-1)
order = np.delete(order, np.where(order == i)[0])
for k in ks:
metrics[f'top_{k}_acc'] += 1 if l_i in index_labels[order[:k]] else 0
positions = np.where(index_labels[order[:k]] == l_i)[0]
if len(positions) == 0:
metrics[f'rank_rate_{k}'] += 0.
else:
metrics[f'rank_rate_{k}'] += (k - positions[0]) / k
for k in ks:
metrics[f'top_{k}_acc'] /= tested
metrics[f'rank_rate_{k}'] /= tested
metrics['mean_dist_pos'] = np.mean(dists_pos)
metrics['mean_dist_neg'] = np.mean(dists_neg)
return metrics
|
/sap_computer_vision_package-1.1.7-py3-none-any.whl/sap_computer_vision/evaluators/contrastive.py
| 0.962036 | 0.472075 |
contrastive.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.