index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
6,797 | bottle | abort | Aborts execution and causes a HTTP error. | def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
| (code=500, text='Unknown Error.') |
6,798 | bottle | auth_basic | Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. | def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
| (check, realm='private', text='Access denied') |
6,801 | bottle | cached_property | A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. | class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| (func) |
6,802 | bottle | __get__ | null | def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| (self, obj, cls) |
6,803 | bottle | __init__ | null | def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
| (self, func) |
6,804 | bottle | <lambda> | null | callable = lambda x: hasattr(x, '__call__')
| (x) |
6,806 | bottle | cookie_decode | Verify and decode an encoded string. Return an object or None. | def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())):
return pickle.loads(base64.b64decode(msg))
return None
| (data, key) |
6,807 | bottle | cookie_encode | Encode and sign a pickle-able object. Return a (byte) string | def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())
return tob('!') + sig + tob('?') + msg
| (data, key) |
6,808 | bottle | cookie_is_encoded | Return True if the argument looks like a encoded cookie. | def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
| (data) |
6,809 | datetime | date | date(year, month, day) --> date object | class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (month is None and
isinstance(year, (bytes, str)) and len(year) == 4 and
1 <= ord(year[2:3]) <= 12):
# Pickle support
if isinstance(year, str):
try:
year = year.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a date object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a date from the output of date.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
assert len(date_string) == 10
return cls(*_parse_isoformat_date(date_string))
except Exception:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
@classmethod
def fromisocalendar(cls, year, week, day):
"""Construct a date from the ISO year, week number and weekday.
This is the inverse of the date.isocalendar() function"""
# Year is bounded this way because 9999-12-31 is (9999, 52, 5)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError(f"Year is out of range: {year}")
if not 0 < week < 53:
out_of_range = True
if week == 53:
# ISO years have 53 weeks in them on years starting with a
# Thursday and leap years starting on a Wednesday
first_weekday = _ymd2ord(year, 1, 1) % 7
if (first_weekday == 4 or (first_weekday == 3 and
_is_leap(year))):
out_of_range = False
if out_of_range:
raise ValueError(f"Invalid week: {week}")
if not 0 < day < 8:
raise ValueError(f"Invalid weekday: {day} (range is [1, 7])")
# Now compute the offset from (Y, 1, 1) in days:
day_offset = (week - 1) * 7 + (day - 1)
# Calculate the ordinal day for monday, week 1
day_1 = _isoweek1monday(year)
ord_day = day_1 + day_offset
return cls(*_ord2ymd(ord_day))
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
# __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return type(self)(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return type(self).fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a named tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
(used with permission)
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return _IsoCalendarDate(year, week+1, day+1)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
| null |
6,812 | bottle | delete | Equals :meth:`route` with a ``DELETE`` method parameter. | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, path=None, method='DELETE', **options) |
6,813 | bottle | depr | null | def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
| (message, hard=False) |
6,815 | bottle | error | Decorator: Register an output handler for a HTTP error code | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, code=500) |
6,817 | traceback | format_exc | Like print_exc() but return a string. | def format_exc(limit=None, chain=True):
"""Like print_exc() but return a string."""
return "".join(format_exception(*sys.exc_info(), limit=limit, chain=chain))
| (limit=None, chain=True) |
6,819 | bottle | get | Equals :meth:`route`. | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, path=None, method='GET', **options) |
6,820 | bottle | getargspec | null | def getargspec(func):
spec = getfullargspec(func)
kwargs = makelist(spec[0]) + makelist(spec.kwonlyargs)
return kwargs, spec[1], spec[2], spec[3]
| (func) |
6,821 | inspect | getfullargspec | Get the names and default values of a callable object's parameters.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations).
'args' is a list of the parameter names.
'varargs' and 'varkw' are the names of the * and ** parameters or None.
'defaults' is an n-tuple of the default values of the last n parameters.
'kwonlyargs' is a list of keyword-only parameter names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping parameter names to annotations.
Notable differences from inspect.signature():
- the "self" parameter is always reported, even for bound methods
- wrapper chains defined by __wrapped__ *not* unwrapped automatically
| def getfullargspec(func):
"""Get the names and default values of a callable object's parameters.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations).
'args' is a list of the parameter names.
'varargs' and 'varkw' are the names of the * and ** parameters or None.
'defaults' is an n-tuple of the default values of the last n parameters.
'kwonlyargs' is a list of keyword-only parameter names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping parameter names to annotations.
Notable differences from inspect.signature():
- the "self" parameter is always reported, even for bound methods
- wrapper chains defined by __wrapped__ *not* unwrapped automatically
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_from_callable(func,
follow_wrapper_chains=False,
skip_bound_arg=False,
sigcls=Signature,
eval_str=False)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
posonlyargs = []
kwonlyargs = []
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
posonlyargs.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(posonlyargs + args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
| (func) |
6,824 | bottle | hook | Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details. | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, name) |
6,825 | bottle | html_escape | Escape HTML special characters ``&<>`` and quotes ``'"``. | def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
| (string) |
6,826 | bottle | html_quote | Escape and quote a string to be used as an HTTP attribute. | def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
| (string) |
6,827 | bottle | http_date | null | def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
| (value) |
6,829 | builtins | map | map(func, *iterables) --> map object
Make an iterator that computes the function using arguments from
each of the iterables. Stops when the shortest iterable is exhausted. | from builtins import map
| null |
6,830 | bottle | install | Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
| def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, plugin) |
6,832 | json | dumps | Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value can contain non-ASCII
characters if they appear in strings contained in ``obj``. Otherwise, all
such characters are escaped in JSON strings.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``RecursionError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If specified, ``separators`` should be an ``(item_separator, key_separator)``
tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and
``(',', ': ')`` otherwise. To get the most compact JSON representation,
you should specify ``(',', ':')`` to eliminate whitespace.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is true (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
| def dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
default=None, sort_keys=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value can contain non-ASCII
characters if they appear in strings contained in ``obj``. Otherwise, all
such characters are escaped in JSON strings.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``RecursionError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If specified, ``separators`` should be an ``(item_separator, key_separator)``
tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and
``(',', ': ')`` otherwise. To get the most compact JSON representation,
you should specify ``(',', ':')`` to eliminate whitespace.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is true (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
default is None and not sort_keys and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, default=default, sort_keys=sort_keys,
**kw).encode(obj)
| (obj, *, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, default=None, sort_keys=False, **kw) |
6,834 | bottle | <lambda> | null | json_loads = lambda s: json_lds(touni(s))
| (s) |
6,835 | bottle | lazy_attribute | A property that caches itself to the class object. | class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
| (func) |
6,836 | bottle | __get__ | null | def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
| (self, obj, cls) |
6,837 | bottle | __init__ | null | def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
| (self, func) |
6,838 | bottle | load | Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
| def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
| (target, **namespace) |
6,839 | bottle | load_app | Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. | def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
| (target) |
6,840 | bottle | local_property | null | def local_property(name=None):
if name: depr('local_property() is deprecated and will be removed.') #0.12
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
| (name=None) |
6,841 | bottle | make_default_app_wrapper | Return a callable that relays calls to the current default app. | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (name) |
6,842 | bottle | makelist | null | def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
| (data) |
6,844 | bottle | mount | Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
| def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, prefix, app, **options) |
6,845 | builtins | module | Create a module object.
The name must be a string; the optional doc argument can have any type. | from builtins import module
| (name, doc=None) |
6,847 | bottle | parse_auth | Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None | def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
| (header) |
6,848 | bottle | parse_date | Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. | def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
| (ims) |
6,849 | bottle | parse_range_header | Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive. | def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
| (header, maxlen=0) |
6,850 | bottle | path_shift | Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
| def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
| (script_name, path_info, shift=1) |
6,852 | bottle | post | Equals :meth:`route` with a ``POST`` method parameter. | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, path=None, method='POST', **options) |
6,853 | traceback | print_exc | Shorthand for 'print_exception(*sys.exc_info(), limit, file)'. | def print_exc(limit=None, file=None, chain=True):
"""Shorthand for 'print_exception(*sys.exc_info(), limit, file)'."""
print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain)
| (limit=None, file=None, chain=True) |
6,854 | bottle | put | Equals :meth:`route` with a ``PUT`` method parameter. | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, path=None, method='PUT', **options) |
6,856 | bottle | redirect | Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. | def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
| (url, code=None) |
6,857 | bottle | route | A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
| def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config) |
6,858 | bottle | run | Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
| def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
| (app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=None, **kargs) |
6,859 | bottle | static_file | Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
| def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
| (filename, root, mimetype='auto', download=False, charset='UTF-8') |
6,863 | bottle | template |
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
| def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
| (*args, **kwargs) |
6,868 | bottle | tob | null | def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
| (s, enc='utf8') |
6,869 | bottle | touni | null | def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
| (s, enc='utf8', err='strict') |
6,872 | bottle | uninstall | Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, plugin) |
6,873 | bottle | update_wrapper | null | def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
| (wrapper, wrapped, *a, **ka) |
6,874 | bottle | get_url | Return a string that matches a named route | def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
| (self, routename, **kargs) |
6,875 | urllib.parse | urlencode | Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
| def urlencode(query, doseq=False, safe='', encoding=None, errors=None,
quote_via=quote_plus):
"""Encode a dict or sequence of two-element tuples into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
else:
v = quote_via(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_via(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_via(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_via(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
| (query, doseq=False, safe='', encoding=None, errors=None, quote_via=<function quote_plus at 0x7f45e6cf81f0>) |
6,876 | urllib.parse | urljoin | Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter. | def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
base_parts = bpath.split('/')
if base_parts[-1] != '':
# the last item is not a directory, so will not be taken into account
# in resolving the relative path
del base_parts[-1]
# for rfc3986, ignore all base path should the first character be root.
if path[:1] == '/':
segments = path.split('/')
else:
segments = base_parts + path.split('/')
# filter out elements that would cause redundant slashes on re-joining
# the resolved_path
segments[1:-1] = filter(None, segments[1:-1])
resolved_path = []
for seg in segments:
if seg == '..':
try:
resolved_path.pop()
except IndexError:
# ignore any .. segments that would otherwise cause an IndexError
# when popped from resolved_path if resolving for rfc3986
pass
elif seg == '.':
continue
else:
resolved_path.append(seg)
if segments[-1] in ('.', '..'):
# do some post-processing here. if the last segment was a relative dir,
# then we need to append the trailing '/'
resolved_path.append('')
return _coerce_result(urlunparse((scheme, netloc, '/'.join(
resolved_path) or '/', params, query, fragment)))
| (base, url, allow_fragments=True) |
6,877 | urllib.parse | quote | quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted. The
quote function offers a cautious (not minimal) way to quote a
string for most of these parts.
RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists
the following (un)reserved characters.
unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
reserved = gen-delims / sub-delims
gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
/ "*" / "+" / "," / ";" / "="
Each of the reserved characters is reserved in some component of a URL,
but not necessarily in all of them.
The quote function %-escapes all characters that are neither in the
unreserved chars ("always safe") nor the additional chars set via the
safe arg.
The default for the safe arg is '/'. The character is reserved, but in
typical usage the quote function is being called on a path where the
existing slash characters are to be preserved.
Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings.
Now, "~" is included in the set of unreserved characters.
string and safe may be either str or bytes objects. encoding and errors
must not be specified if string is a bytes object.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
| def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted. The
quote function offers a cautious (not minimal) way to quote a
string for most of these parts.
RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists
the following (un)reserved characters.
unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
reserved = gen-delims / sub-delims
gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
/ "*" / "+" / "," / ";" / "="
Each of the reserved characters is reserved in some component of a URL,
but not necessarily in all of them.
The quote function %-escapes all characters that are neither in the
unreserved chars ("always safe") nor the additional chars set via the
safe arg.
The default for the safe arg is '/'. The character is reserved, but in
typical usage the quote function is being called on a path where the
existing slash characters are to be preserved.
Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings.
Now, "~" is included in the set of unreserved characters.
string and safe may be either str or bytes objects. encoding and errors
must not be specified if string is a bytes object.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
| (string, safe='/', encoding=None, errors=None) |
6,878 | bottle | view | Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
| def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, **defaults)
return result
return wrapper
return decorator
| (tpl_name, **defaults) |
6,880 | bottle | yieldroutes | Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
| def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
| (func) |
6,881 | colorhash.colorhash | ColorHash |
Generate a color value and provide it in several format.
Args:
obj: the value.
lightness: a range of values, one of which will be picked for the
lightness component of the result. Can also be a single
number.
saturation: a range of values, one of which will be picked for the
saturation component of the result. Can also be a single
number.
min_h: if set, limit the hue component to this lower value.
max_h: if set, limit the hue component to this upper value.
Attributes:
hsl: HSL representation of the color value.
rgb: RGB representation of the color value.
hex: hex-formatted RGB color value.
| class ColorHash:
"""
Generate a color value and provide it in several format.
Args:
obj: the value.
lightness: a range of values, one of which will be picked for the
lightness component of the result. Can also be a single
number.
saturation: a range of values, one of which will be picked for the
saturation component of the result. Can also be a single
number.
min_h: if set, limit the hue component to this lower value.
max_h: if set, limit the hue component to this upper value.
Attributes:
hsl: HSL representation of the color value.
rgb: RGB representation of the color value.
hex: hex-formatted RGB color value.
"""
def __init__(
self,
obj: Any,
lightness: Sequence[float] = (0.35, 0.5, 0.65),
saturation: Sequence[float] = (0.35, 0.5, 0.65),
min_h: Optional[int] = None,
max_h: Optional[int] = None,
):
self.hsl: Tuple[float, float, float] = color_hash(
obj=obj,
lightness=lightness,
saturation=saturation,
min_h=min_h,
max_h=max_h,
)
@property
def rgb(self) -> Tuple[int, int, int]:
return hsl2rgb(self.hsl)
@property
def hex(self) -> str:
return rgb2hex(self.rgb)
| (obj: Any, lightness: Sequence[float] = (0.35, 0.5, 0.65), saturation: Sequence[float] = (0.35, 0.5, 0.65), min_h: Optional[int] = None, max_h: Optional[int] = None) |
6,882 | colorhash.colorhash | __init__ | null | def __init__(
self,
obj: Any,
lightness: Sequence[float] = (0.35, 0.5, 0.65),
saturation: Sequence[float] = (0.35, 0.5, 0.65),
min_h: Optional[int] = None,
max_h: Optional[int] = None,
):
self.hsl: Tuple[float, float, float] = color_hash(
obj=obj,
lightness=lightness,
saturation=saturation,
min_h=min_h,
max_h=max_h,
)
| (self, obj: Any, lightness: Sequence[float] = (0.35, 0.5, 0.65), saturation: Sequence[float] = (0.35, 0.5, 0.65), min_h: Optional[int] = None, max_h: Optional[int] = None) |
6,884 | colorhash | get_version |
Fast (dev time) way to get version.
| def get_version(_):
"""
Fast (dev time) way to get version.
"""
with open("pyproject.toml") as f:
for line in f.readlines():
if line.startswith("version = "):
ver = line.split("=")[1].strip().strip('"')
return ver
| (_) |
6,885 | importlib.metadata | version | Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
| def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
| (distribution_name) |
6,886 | roa_checker.roa | ROA | null | class ROA(CIDRNode):
def __init__(self, *args, **kwargs):
"""Initializes the ROA node"""
super(ROA, self).__init__(*args, **kwargs)
# Origin max length pairs
self.origin_max_lengths: set[tuple[int, int]] = set()
# Mypy doesn't understand *args in super class
def add_data( # type: ignore
self,
prefix: IPv4Network | IPv6Network,
origin: int,
max_length: Optional[int] = None,
):
"""Adds data to the node"""
if max_length is None:
max_length = prefix.prefixlen
self.prefix = prefix
self.origin_max_lengths.add((origin, max_length))
def get_validity(
self, prefix: IPv4Network | IPv6Network, origin: int
) -> tuple[ROAValidity, ROARouted]:
"""Gets the ROA validity of a prefix origin pair
This gets pretty complicated because we need to calculate
both validiate and routed, and there can be multiple ROAs
for the same announcement.
In other words, we need to calculate the best ROA for a given
announcement, and then use the validity of that ROA. Ie
the "most valid" ROA is the one that should be used.
"""
assert isinstance(prefix, type(self.prefix))
# Mypy isn't getting that these types are the same
if not prefix.subnet_of(self.prefix): # type: ignore
return ROAValidity.UNKNOWN, ROARouted.UNKNOWN
else:
roa_validities = list()
for self_origin, max_length in self.origin_max_lengths:
routed = ROARouted.NON_ROUTED if self_origin == 0 else ROARouted.ROUTED
if prefix.prefixlen > max_length and origin != self_origin:
roa_validities.append(
ROAOutcome(ROAValidity.INVALID_LENGTH_AND_ORIGIN, routed)
)
elif prefix.prefixlen > max_length and origin == self_origin:
roa_validities.append(
ROAOutcome(ROAValidity.INVALID_LENGTH, routed)
)
elif prefix.prefixlen <= max_length and origin != self_origin:
roa_validities.append(
ROAOutcome(ROAValidity.INVALID_ORIGIN, routed)
)
elif prefix.prefixlen <= max_length and origin == self_origin:
roa_validities.append(ROAOutcome(ROAValidity.VALID, routed))
else:
raise NotImplementedError("This should never happen")
best_outcome = sorted(roa_validities)[0]
return best_outcome.validity, best_outcome.routed
| (*args, **kwargs) |
6,887 | roa_checker.roa | __init__ | Initializes the ROA node | def __init__(self, *args, **kwargs):
"""Initializes the ROA node"""
super(ROA, self).__init__(*args, **kwargs)
# Origin max length pairs
self.origin_max_lengths: set[tuple[int, int]] = set()
| (self, *args, **kwargs) |
6,888 | roa_checker.roa | add_data | Adds data to the node | def add_data( # type: ignore
self,
prefix: IPv4Network | IPv6Network,
origin: int,
max_length: Optional[int] = None,
):
"""Adds data to the node"""
if max_length is None:
max_length = prefix.prefixlen
self.prefix = prefix
self.origin_max_lengths.add((origin, max_length))
| (self, prefix: ipaddress.IPv4Network | ipaddress.IPv6Network, origin: int, max_length: Optional[int] = None) |
6,889 | roa_checker.roa | get_validity | Gets the ROA validity of a prefix origin pair
This gets pretty complicated because we need to calculate
both validiate and routed, and there can be multiple ROAs
for the same announcement.
In other words, we need to calculate the best ROA for a given
announcement, and then use the validity of that ROA. Ie
the "most valid" ROA is the one that should be used.
| def get_validity(
self, prefix: IPv4Network | IPv6Network, origin: int
) -> tuple[ROAValidity, ROARouted]:
"""Gets the ROA validity of a prefix origin pair
This gets pretty complicated because we need to calculate
both validiate and routed, and there can be multiple ROAs
for the same announcement.
In other words, we need to calculate the best ROA for a given
announcement, and then use the validity of that ROA. Ie
the "most valid" ROA is the one that should be used.
"""
assert isinstance(prefix, type(self.prefix))
# Mypy isn't getting that these types are the same
if not prefix.subnet_of(self.prefix): # type: ignore
return ROAValidity.UNKNOWN, ROARouted.UNKNOWN
else:
roa_validities = list()
for self_origin, max_length in self.origin_max_lengths:
routed = ROARouted.NON_ROUTED if self_origin == 0 else ROARouted.ROUTED
if prefix.prefixlen > max_length and origin != self_origin:
roa_validities.append(
ROAOutcome(ROAValidity.INVALID_LENGTH_AND_ORIGIN, routed)
)
elif prefix.prefixlen > max_length and origin == self_origin:
roa_validities.append(
ROAOutcome(ROAValidity.INVALID_LENGTH, routed)
)
elif prefix.prefixlen <= max_length and origin != self_origin:
roa_validities.append(
ROAOutcome(ROAValidity.INVALID_ORIGIN, routed)
)
elif prefix.prefixlen <= max_length and origin == self_origin:
roa_validities.append(ROAOutcome(ROAValidity.VALID, routed))
else:
raise NotImplementedError("This should never happen")
best_outcome = sorted(roa_validities)[0]
return best_outcome.validity, best_outcome.routed
| (self, prefix: ipaddress.IPv4Network | ipaddress.IPv6Network, origin: int) -> tuple[roa_checker.enums.ROAValidity, roa_checker.enums.ROARouted] |
6,890 | roa_checker.roa_checker | ROAChecker | Gets validity of prefix origin pairs against ROAs | class ROAChecker:
"""Gets validity of prefix origin pairs against ROAs"""
def __init__(self):
"""Initializes both ROA tries"""
self.ipv4_trie = IPv4ROATrie()
self.ipv6_trie = IPv6ROATrie()
def insert(
self, prefix: IPv4Network | IPv6Network, origin: int, max_length: Optional[int]
) -> None:
"""Inserts a prefix into the tries"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
# mypy struggling with this
return trie.insert(prefix, origin, max_length) # type: ignore
def get_roa(self, prefix: IPv4Network | IPv6Network, *args) -> Optional[ROA]:
"""Gets the ROA covering prefix-origin pair"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
assert isinstance(trie, CIDRTrie)
roa = trie.get_most_specific_trie_supernet(prefix)
assert roa is None or isinstance(roa, ROA), "for mypy"
return roa
def get_validity(
self, prefix: IPv4Network | IPv6Network, origin: int
) -> tuple[ROAValidity, ROARouted]:
"""Gets the validity of a prefix origin pair"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
assert isinstance(trie, ROATrie), "for mypy"
return trie.get_validity(prefix, origin)
| () |
6,891 | roa_checker.roa_checker | __init__ | Initializes both ROA tries | def __init__(self):
"""Initializes both ROA tries"""
self.ipv4_trie = IPv4ROATrie()
self.ipv6_trie = IPv6ROATrie()
| (self) |
6,892 | roa_checker.roa_checker | get_roa | Gets the ROA covering prefix-origin pair | def get_roa(self, prefix: IPv4Network | IPv6Network, *args) -> Optional[ROA]:
"""Gets the ROA covering prefix-origin pair"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
assert isinstance(trie, CIDRTrie)
roa = trie.get_most_specific_trie_supernet(prefix)
assert roa is None or isinstance(roa, ROA), "for mypy"
return roa
| (self, prefix: ipaddress.IPv4Network | ipaddress.IPv6Network, *args) -> Optional[roa_checker.roa.ROA] |
6,893 | roa_checker.roa_checker | get_validity | Gets the validity of a prefix origin pair | def get_validity(
self, prefix: IPv4Network | IPv6Network, origin: int
) -> tuple[ROAValidity, ROARouted]:
"""Gets the validity of a prefix origin pair"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
assert isinstance(trie, ROATrie), "for mypy"
return trie.get_validity(prefix, origin)
| (self, prefix: ipaddress.IPv4Network | ipaddress.IPv6Network, origin: int) -> tuple[roa_checker.enums.ROAValidity, roa_checker.enums.ROARouted] |
6,894 | roa_checker.roa_checker | insert | Inserts a prefix into the tries | def insert(
self, prefix: IPv4Network | IPv6Network, origin: int, max_length: Optional[int]
) -> None:
"""Inserts a prefix into the tries"""
trie = self.ipv4_trie if prefix.version == 4 else self.ipv6_trie
# mypy struggling with this
return trie.insert(prefix, origin, max_length) # type: ignore
| (self, prefix: ipaddress.IPv4Network | ipaddress.IPv6Network, origin: int, max_length: Optional[int]) -> NoneType |
6,895 | roa_checker.enums | ROARouted | An enumeration. | class ROARouted(Enum):
ROUTED = 0
UNKNOWN = 1
# A ROA is Non Routed if it is for an origin of ASN 0
# This means that the prefix for this ROA should never be announced
NON_ROUTED = 2
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
6,896 | roa_checker.enums | ROAValidity | An enumeration. | class ROAValidity(Enum):
# NOTE: These values double as "scores" for validity,
# so do NOT change the order
# (used in the ROA class)
VALID = 0
UNKNOWN = 1
# Note that we cannot differentiate between invalid by length
# or invalid by origin or invalid by both
# That is because for the same prefix you can have multiple
# max lengths or multiple origins
# And you select the most valid roa. So is invalid by length
# more valid than invalid by origin? No. So we just say invalid
# Nixing the comment above with the following methodology:
# NOTE: There can be multiple ROAs for the same prefix
# So if we say a ROA is invalid by length and origin
# it could potentially be invalid by length for one ROA
# and invalid by origin for another prefix
# If we say non routed, it's violating at least one non routed ROA
INVALID_LENGTH = 2
INVALID_ORIGIN = 3
INVALID_LENGTH_AND_ORIGIN = 4
@staticmethod
def is_valid(roa_validity: "ROAValidity") -> bool:
return roa_validity == ROAValidity.VALID
@staticmethod
def is_unknown(roa_validity: "ROAValidity") -> bool:
return roa_validity == ROAValidity.UNKNOWN
@staticmethod
def is_invalid(roa_validity: "ROAValidity") -> bool:
return roa_validity in (
ROAValidity.INVALID_LENGTH,
ROAValidity.INVALID_ORIGIN,
ROAValidity.INVALID_LENGTH_AND_ORIGIN,
)
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
6,906 | getschema.impl | fix_type | Convert the fields into the proper object types.
e.g. {"number": "1.0"} -> {"number": 1.0}
- on_invalid_property: ["raise", "null", "force"]
What to do when the value cannot be converted.
- raise: Raise exception
- null: Impute with null
- force: Keep it as is (string)
| def fix_type(
obj,
schema,
dict_path=[],
on_invalid_property="raise",
lower=False,
replace_special=False,
snake_case=False,
date_to_datetime=False,
):
"""Convert the fields into the proper object types.
e.g. {"number": "1.0"} -> {"number": 1.0}
- on_invalid_property: ["raise", "null", "force"]
What to do when the value cannot be converted.
- raise: Raise exception
- null: Impute with null
- force: Keep it as is (string)
"""
kwargs = {
"on_invalid_property": on_invalid_property,
"lower": lower,
"replace_special": replace_special,
"snake_case": snake_case,
"date_to_datetime": date_to_datetime,
}
invalid_actions = ["raise", "null", "force"]
if on_invalid_property not in invalid_actions:
raise ValueError(
"on_invalid_property is not one of %s" % invalid_actions)
obj_type = _nested_get(schema, dict_path + ["type"])
obj_format = _nested_get(schema, dict_path + ["format"])
nullable = False
if obj_type is None:
if on_invalid_property == "raise":
raise ValueError("Unknown property found at: %s" % dict_path)
return None
if type(obj_type) is list:
if len(obj_type) > 2:
raise Exception("Sorry, getschema does not support multiple types")
nullable = ("null" in obj_type)
obj_type = obj_type[1] if obj_type[0] == "null" else obj_type[0]
if obj is None:
if not nullable:
if on_invalid_property == "raise":
raise ValueError("Null object given at %s" % dict_path)
return None
# Recurse if object or array types
if obj_type == "object":
if type(obj) is not dict:
raise KeyError("property type (object) Expected a dict object." +
"Got: %s %s at %s" % (type(obj), str(obj), str(dict_path)))
cleaned = dict()
keys = obj.keys()
for key in keys:
ret = fix_type(obj[key], schema, dict_path + ["properties", key],
**kwargs)
cleaned[key] = ret
new_key = _convert_key(key, lower, replace_special, snake_case)
if key != new_key:
cleaned[new_key] = cleaned.pop(key)
elif obj_type == "array":
assert(type(obj) is list)
cleaned = list()
for o in obj:
ret = fix_type(o, schema, dict_path + ["items"],
**kwargs)
if ret is not None:
cleaned.append(ret)
else:
if obj_type == "string":
if obj is None:
cleaned = None
else:
cleaned = str(obj)
if obj_format == "date-time":
# Just test parsing for now. Not converting to Python's
# datetime as re-JSONifying datetime is not straight-foward
if not _is_datetime(cleaned):
cleaned = _on_invalid_property(
on_invalid_property,
dict_path, obj_type, cleaned,
err_msg="Not in a valid datetime format",
)
elif date_to_datetime and len(cleaned) == 10: # "2023-10-19"
cleaned += " 00:00:00.000"
elif obj_type == "number":
if obj is None:
cleaned = None
else:
try:
cleaned = float(obj)
except ValueError as e:
cleaned = _on_invalid_property(
on_invalid_property, dict_path, obj_type, obj,
err_msg=str(e))
elif obj_type == "integer":
if obj is None:
cleaned = None
else:
try:
cleaned = int(obj)
except ValueError as e:
cleaned = _on_invalid_property(
on_invalid_property, dict_path, obj_type, obj,
err_msg=str(e))
elif obj_type == "boolean":
if obj is None:
cleaned = None
elif str(obj).lower() == "true":
cleaned = True
elif str(obj).lower() == "false":
cleaned = False
else:
cleaned = _on_invalid_property(
on_invalid_property, dict_path, obj_type, obj,
err_msg=(str(obj) +
" is not a valid value for boolean type"))
else:
raise Exception("Invalid type in schema: %s" % obj_type)
return cleaned
| (obj, schema, dict_path=[], on_invalid_property='raise', lower=False, replace_special=False, snake_case=False, date_to_datetime=False) |
6,908 | getschema.impl | infer_from_csv_file | null | def infer_from_csv_file(filename, skip=0, lower=False, replace_special=False,
snake_case=False):
with open(filename) as f:
count = 0
while count < skip:
count = count + 1
f.readline()
reader = csv.DictReader(f)
data = [dict(row) for row in reader]
schema = infer_schema(data, lower=lower, replace_special=replace_special,
snake_case=snake_case)
return schema
| (filename, skip=0, lower=False, replace_special=False, snake_case=False) |
6,909 | getschema.impl | infer_from_file | null | def infer_from_file(filename, fmt="json", skip=0, lower=False,
replace_special=False, snake_case=False):
if fmt == "json":
schema = infer_from_json_file(
filename, skip, lower, replace_special, snake_case)
elif fmt == "yaml":
schema = infer_from_yaml_file(
filename, skip, lower, replace_special, snake_case)
elif fmt == "csv":
schema = infer_from_csv_file(
filename, skip, lower, replace_special, snake_case)
else:
raise KeyError("Unsupported format : " + fmt)
return schema
| (filename, fmt='json', skip=0, lower=False, replace_special=False, snake_case=False) |
6,910 | getschema.impl | infer_from_json_file | null | def infer_from_json_file(filename, skip=0, lower=False, replace_special=False,
snake_case=False):
with open(filename, "r") as f:
content = f.read()
data = json.loads(content)
if type(data) is list:
data = data[skip:]
schema = infer_schema(data, lower=lower, replace_special=replace_special,
snake_case=snake_case)
return schema
| (filename, skip=0, lower=False, replace_special=False, snake_case=False) |
6,911 | getschema.impl | infer_from_yaml_file | null | def infer_from_yaml_file(filename, skip=0, lower=False, replace_special=False,
snake_case=False):
with open(filename, "r") as f:
content = f.read()
data = yaml.load(content, Loader=yaml.FullLoader)
if type(data) is list:
data = data[skip:]
schema = infer_schema(data, lower=lower, replace_special=replace_special,
snake_case=snake_case)
return schema
| (filename, skip=0, lower=False, replace_special=False, snake_case=False) |
6,912 | getschema.impl | infer_schema | Infer schema from a given object or a list of objects
- record_level:
- lower: Convert the key to all lower case
- replace_special: Replace letters to _ if not 0-9, A-Z, a-z, _ and -, or " "
- snake_case: Replace space to _
| def infer_schema(obj, record_level=None,
lower=False, replace_special=False, snake_case=False):
"""Infer schema from a given object or a list of objects
- record_level:
- lower: Convert the key to all lower case
- replace_special: Replace letters to _ if not 0-9, A-Z, a-z, _ and -, or " "
- snake_case: Replace space to _
"""
if type(obj) is not list:
obj = [obj]
if type(obj[0]) is not dict:
raise ValueError("Input must be a dict object.")
schema = None
# Go through the list of objects and find the most safe type assumption
for o in obj:
cur_schema = _do_infer_schema(
o, record_level, lower, replace_special, snake_case)
# Compare between currently the most conservative and the new record
# and keep the more conservative.
schema = _infer_from_two(schema, cur_schema)
schema["type"] = "object"
schema = _replace_null_type(schema)
return schema
| (obj, record_level=None, lower=False, replace_special=False, snake_case=False) |
6,916 | getschema | main |
Entry point
| def main():
"""
Entry point
"""
parser = argparse.ArgumentParser(COMMAND)
parser.add_argument("data", type=str, help="json record file")
parser.add_argument("--indent", "-i", default=2, type=int,
help="Number of spaces for indentation")
parser.add_argument("--type", "-t", default="json", type=str,
help="Record format (json, yaml, csv)")
parser.add_argument("--skip", "-s", default=0, type=int,
help="Skip first n records. Don't skip the header row.")
parser.add_argument("--lower", "-l", default=False, action="store_true",
help="Convert the keys to lower case'")
parser.add_argument("--replace_special", "-r", default=None, type=str,
help="Replace special characters in the keys with the specified string")
parser.add_argument("--snakecase", "-n", default=False, action="store_true",
help="Convert the keys to 'snake_case'")
args = parser.parse_args()
schema = infer_from_file(args.data, args.type.lower(), args.skip,
args.lower, args.replace_special, args.snakecase)
print(json.dumps(schema, indent=args.indent))
| () |
6,920 | dateutil.tz.tz | tzoffset |
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object).
| class tzoffset(datetime.tzinfo):
"""
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object).
"""
def __init__(self, name, offset):
self._name = name
try:
# Allow a timedelta
offset = offset.total_seconds()
except (TypeError, AttributeError):
pass
self._offset = datetime.timedelta(seconds=_get_supported_offset(offset))
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
@_validate_fromutc_inputs
def fromutc(self, dt):
return dt + self._offset
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
int(self._offset.total_seconds()))
__reduce__ = object.__reduce__
| (name, offset) |
6,921 | dateutil.tz.tz | __eq__ | null | def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
| (self, other) |
6,922 | dateutil.tz.tz | __init__ | null | def __init__(self, name, offset):
self._name = name
try:
# Allow a timedelta
offset = offset.total_seconds()
except (TypeError, AttributeError):
pass
self._offset = datetime.timedelta(seconds=_get_supported_offset(offset))
| (self, name, offset) |
6,923 | dateutil.tz.tz | __ne__ | null | def __ne__(self, other):
return not (self == other)
| (self, other) |
6,924 | dateutil.tz.tz | __repr__ | null | def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
int(self._offset.total_seconds()))
| (self) |
6,925 | dateutil.tz.tz | dst | null | def dst(self, dt):
return ZERO
| (self, dt) |
6,926 | dateutil.tz.tz | fromutc | null | @six.add_metaclass(_TzOffsetFactory)
class tzoffset(datetime.tzinfo):
"""
A simple class for representing a fixed offset from UTC.
:param name:
The timezone name, to be returned when ``tzname()`` is called.
:param offset:
The time zone offset in seconds, or (since version 2.6.0, represented
as a :py:class:`datetime.timedelta` object).
"""
def __init__(self, name, offset):
self._name = name
try:
# Allow a timedelta
offset = offset.total_seconds()
except (TypeError, AttributeError):
pass
self._offset = datetime.timedelta(seconds=_get_supported_offset(offset))
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
@_validate_fromutc_inputs
def fromutc(self, dt):
return dt + self._offset
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
def __eq__(self, other):
if not isinstance(other, tzoffset):
return NotImplemented
return self._offset == other._offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
int(self._offset.total_seconds()))
__reduce__ = object.__reduce__
| (self, dt) |
6,927 | dateutil.tz.tz | is_ambiguous |
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
| def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
| (self, dt) |
6,928 | dateutil.tz.tz | tzname | null | @tzname_in_python2
def tzname(self, dt):
return self._name
| (self, dt) |
6,929 | dateutil.tz.tz | utcoffset | null | def utcoffset(self, dt):
return self._offset
| (self, dt) |
6,931 | abc | ABC | Helper class that provides a standard way to create an ABC using
inheritance.
| class ABC(metaclass=ABCMeta):
"""Helper class that provides a standard way to create an ABC using
inheritance.
"""
__slots__ = ()
| () |
6,932 | growthbook | AbstractFeatureCache | null | class AbstractFeatureCache(ABC):
@abstractmethod
def get(self, key: str) -> Optional[Dict]:
pass
@abstractmethod
def set(self, key: str, value: Dict, ttl: int) -> None:
pass
def clear(self) -> None:
pass
| () |
6,933 | growthbook | clear | null | def clear(self) -> None:
pass
| (self) -> NoneType |
6,934 | growthbook | get | null | @abstractmethod
def get(self, key: str) -> Optional[Dict]:
pass
| (self, key: str) -> Optional[Dict] |
6,935 | growthbook | set | null | @abstractmethod
def set(self, key: str, value: Dict, ttl: int) -> None:
pass
| (self, key: str, value: Dict, ttl: int) -> NoneType |
6,936 | growthbook | AbstractStickyBucketService | null | class AbstractStickyBucketService(ABC):
@abstractmethod
def get_assignments(self, attributeName: str, attributeValue: str) -> Optional[Dict]:
pass
@abstractmethod
def save_assignments(self, doc: Dict) -> None:
pass
def get_key(self, attributeName: str, attributeValue: str) -> str:
return f"{attributeName}||{attributeValue}"
# By default, just loop through all attributes and call get_assignments
# Override this method in subclasses to perform a multi-query instead
def get_all_assignments(self, attributes: Dict[str, str]) -> Dict[str, Dict]:
docs = {}
for attributeName, attributeValue in attributes.items():
doc = self.get_assignments(attributeName, attributeValue)
if doc:
docs[self.get_key(attributeName, attributeValue)] = doc
return docs
| () |
6,937 | growthbook | get_all_assignments | null | def get_all_assignments(self, attributes: Dict[str, str]) -> Dict[str, Dict]:
docs = {}
for attributeName, attributeValue in attributes.items():
doc = self.get_assignments(attributeName, attributeValue)
if doc:
docs[self.get_key(attributeName, attributeValue)] = doc
return docs
| (self, attributes: Dict[str, str]) -> Dict[str, Dict] |
6,938 | growthbook | get_assignments | null | @abstractmethod
def get_assignments(self, attributeName: str, attributeValue: str) -> Optional[Dict]:
pass
| (self, attributeName: str, attributeValue: str) -> Optional[Dict] |
6,939 | growthbook | get_key | null | def get_key(self, attributeName: str, attributeValue: str) -> str:
return f"{attributeName}||{attributeValue}"
| (self, attributeName: str, attributeValue: str) -> str |
6,940 | growthbook | save_assignments | null | @abstractmethod
def save_assignments(self, doc: Dict) -> None:
pass
| (self, doc: Dict) -> NoneType |
6,941 | growthbook | CacheEntry | null | class CacheEntry(object):
def __init__(self, value: Dict, ttl: int) -> None:
self.value = value
self.ttl = ttl
self.expires = time() + ttl
def update(self, value: Dict):
self.value = value
self.expires = time() + self.ttl
| (value: Dict, ttl: int) -> None |
Subsets and Splits