repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
rdaton/ARS2015 | otroScript.py | 1 | 3473 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
##intento import cElementTree que es código nativo
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import csv
import os
import glob
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
"""
Creado 1 de Diciembre 2015
@author: R. Daton
"""
##la primera versión se hizo con lxml
##la diferencia con xml a secas, (versión actual)
##es que no tenemos que crear el diccionario
##de namespaces a mano.. pero eso da igual.
##porque aun con lxml hay referencias a nombres del xml, que son hardcoded
##visto en http://stackoverflow.com/questions/14853243/parsing-xml-with-namespace-in-python-via-elementtree
unNameSpaces={'dc': 'http://purl.org/dc/terms/',
'movie': 'http://data.linkedmdb.org/resource/movie/',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
}
def parseaPelicula (ficheroPeliculas,fCsvPeliculas):
##creo puntero a la raíz del árbol
unArbol = ET.parse(ficheroPeliculas)
unaRaiz = unArbol.getroot()
#Saco id de pelicula
unIdPelicula= unaRaiz.find('.//movie:filmid',unNameSpaces).text
##Saco título de pelicula
unTituloPelicula= unaRaiz.find('.//dc:title',unNameSpaces).text
fCsvPeliculas.write(unIdPelicula+';'+unTituloPelicula+'\n')
##Saco Actores que han participado
unaListaActores=list();
for todoElemento in unaRaiz.iterfind('.//movie:actor',unNameSpaces):
##cojo la lista de claves de atributos, y me quedo con el primer nombre de clave
##me evito tener que meter en unaKey el siguiente tocho
##{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource
unaKey=todoElemento.attrib.keys()[0]
unaCadenaActor=todoElemento.attrib.get(unaKey)
##unaCadenaActor, contiene una url tipo http://A/B/C/idActor
##voy a extraer idActor, usando como separador "/" , y accediendo a la posición 5 (rango 0-5)
unSeparador="/"
print unIdPelicula,',',unaCadenaActor.split(unSeparador)[5]
def abreFicheroRw (nombreFichero) :
f=open(nombreFichero,'w');
return f;
def cierraFicheroRw(f):
f.close()
def main():
##bloque de declaración de ficheros de entrada, salida
##indices de ficheros, etc,
##declaraciones e inicializaciones para ficheros de peliculas
##puntero a fichero xml en curso
directorioPelis = 'films'
fEntradaPelicula = ' '
fEntradaPeliculaConRuta=' '
formatoNFicheroXML='data.linkedmdb.org.data.film.*.xml'
##fichero csv de peliculas
##creo carpeta de salida para fichero csv
directorioPelisCsv='films_csv'
if not os.path.exists(directorioPelisCsv):
os.makedirs(directorioPelisCsv)
fSalidaPeliculaCsv = 'pelisCsv.csv'
fSalidaPeliculasCsvConRuta=os.path.join(directorioPelisCsv,fSalidaPeliculaCsv)
##abro el fichero csv para escritura
ficheroPelisCsv=abreFicheroRw(fSalidaPeliculasCsvConRuta)
##genero lista de ficheros xml de la carpeta films (entro y luego salgo de la carpeta)
dirAux=os.getcwd()
os.chdir(directorioPelis)
listaPelisXML=glob.glob(formatoNFicheroXML)
os.chdir(dirAux)
##Genero fichero de nodos de peliculas
for elem in listaPelisXML:
fEntradaPelicula = elem
fEntradaPeliculaConRuta=os.path.join(directorioPelis,fEntradaPelicula)
parseaPelicula(fEntradaPeliculaConRuta,ficheroPelisCsv)
cierraFicheroRw(ficheroPelisCsv)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
lincolnloop/emailed-me | lib/werkzeug/routing.py | 25 | 55181 | # -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
method is raised.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
Thomas Johansson.
:license: BSD, see LICENSE for more details.
"""
import re
from pprint import pformat
from urlparse import urljoin
from itertools import izip
from werkzeug.urls import url_encode, url_quote
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed
from werkzeug._internal import _get_environ
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
def get_converter(map, name, args):
"""Create a new converter for the given arguments or raise
exception if the converter does not exist.
:internal:
"""
if not name in map.converters:
raise LookupError('the converter %r does not exist' % name)
if args:
storage = type('_Storage', (), {'__getitem__': lambda s, x: x})()
args, kwargs = eval(u'(lambda *a, **kw: (a, kw))(%s)' % args, {}, storage)
else:
args = ()
kwargs = {}
return map.converters[name](map, *args, **kwargs)
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, 301)
class RequestSlash(RoutingException):
"""Internal exception."""
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults is not None:
new_defaults = {}
for key, value in rule.defaults.iteritems():
if isinstance(value, basestring):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, basestring):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.defaults = defaults
self.build_only = build_only
if methods is None:
self.methods = None
else:
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.greediness = 0
self.redirect_to = redirect_to
if defaults is not None:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""Return an unbound copy of this rule. This can be useful if you
want to reuse an already bound URL for another map."""
defaults = None
if self.defaults is not None:
defaults = dict(self.defaults)
return Rule(self.rule, defaults, self.subdomain, self.methods,
self.build_only, self.endpoint, self.strict_slashes,
self.redirect_to)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
rule = self.subdomain + '|' + (self.is_leaf and self.rule
or self.rule.rstrip('/'))
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
self._weights.append(len(variable))
else:
convobj = get_converter(map, converter, arguments)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append(convobj.weight)
self.arguments.add(str(variable))
if convobj.is_greedy:
self.greediness += 1
if not self.is_leaf:
self._trace.append((False, '/'))
if not self.build_only:
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and \
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path(method)"`` and is assembled by the map.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__'):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in groups.iteritems():
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults is not None:
result.update(self.defaults)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(data)
subdomain, url = (u''.join(tmp)).split('|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += '?' + url_encode(query_vars, self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return subdomain, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults is not None and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
if method is not None:
if self.methods is not None and method not in self.methods:
return False
valueset = set(values)
for key in self.arguments - set(self.defaults or ()):
if key not in values:
return False
if self.arguments.issubset(valueset):
if self.defaults is None:
return True
for key, value in self.defaults.iteritems():
if value != values[key]:
return False
return True
def match_compare(self, other):
"""Compare this object with another one for matching.
:internal:
"""
for sw, ow in izip(self._weights, other._weights):
if sw > ow:
return -1
elif sw < ow:
return 1
if len(self._weights) > len(other._weights):
return -1
if len(self._weights) < len(other._weights):
return 1
if not other.arguments and self.arguments:
return 1
elif other.arguments and not self.arguments:
return -1
elif other.defaults is None and self.defaults is not None:
return 1
elif other.defaults is not None and self.defaults is None:
return -1
elif self.greediness > other.greediness:
return -1
elif self.greediness < other.greediness:
return 1
elif len(self.arguments) > len(other.arguments):
return 1
elif len(self.arguments) < len(other.arguments):
return -1
return 1
def build_compare(self, other):
"""Compare this object with another one for building.
:internal:
"""
if not other.arguments and self.arguments:
return -1
elif other.arguments and not self.arguments:
return 1
elif other.defaults is None and self.defaults is not None:
return -1
elif other.defaults is not None and self.defaults is None:
return 1
elif self.provides_defaults_for(other):
return -1
elif other.provides_defaults_for(self):
return 1
elif self.greediness > other.greediness:
return -1
elif self.greediness < other.greediness:
return 1
elif len(self.arguments) > len(other.arguments):
return -1
elif len(self.arguments) < len(other.arguments):
return 1
return -1
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
def __ne__(self, other):
return not self.__eq__(other)
def __unicode__(self):
return self.rule
def __str__(self):
charset = self.map is not None and self.map.charset or 'utf-8'
return unicode(self).encode(charset)
def __repr__(self):
if self.map is None:
return '<%s (unbound)>' % self.__class__.__name__
charset = self.map is not None and self.map.charset or 'utf-8'
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append('<%s>' % data)
else:
tmp.append(data)
return '<%s %r%s -> %s>' % (
self.__class__.__name__,
(u''.join(tmp).encode(charset)).lstrip('|'),
self.methods is not None and ' (%s)' % \
', '.join(self.methods) or '',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
is_greedy = False
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or unicode strings::
Rule('/<any(about, help, imprint, u"class"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
is_greedy = True
weight = 50
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = None
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self.default_subdomain = default_subdomain
self.charset = charset
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
"""
if subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if server_name is None:
if 'HTTP_HOST' in environ:
server_name = environ['HTTP_HOST']
else:
server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
server_name += ':' + environ['SERVER_PORT']
elif subdomain is None:
wsgi_server_name = environ.get('HTTP_HOST', environ['SERVER_NAME'])
cur_server_name = wsgi_server_name.split(':', 1)[0].split('.')
real_server_name = server_name.split(':', 1)[0].split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
raise ValueError('the server name provided (%r) does not '
'match the server name from the WSGI '
'environment (%r)' %
(server_name, wsgi_server_name))
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
return Map.bind(self, server_name, environ.get('SCRIPT_NAME'),
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], environ.get('PATH_INFO'))
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if self._remap:
self._rules.sort(lambda a, b: a.match_compare(b))
for rules in self._rules_by_endpoint.itervalues():
rules.sort(lambda a, b: a.build_compare(b))
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s([%s])' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method):
self.map = map
self.server_name = server_name
if not script_name.endswith('/'):
script_name += '/'
self.script_name = script_name
self.subdomain = subdomain
self.url_scheme = url_scheme
self.path_info = path_info or u''
self.default_method = default_method
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug import Request, Response, responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect, e:
return e
return view_func(endpoint, args)
except HTTPException, e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
.. versionadded:: 0.6
`return_rule` was added.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
if not isinstance(path_info, unicode):
path_info = path_info.decode(self.map.charset, 'ignore')
method = (method or self.default_method).upper()
path = u'%s|/%s' % (self.subdomain, path_info.lstrip('/'))
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path)
except RequestSlash:
raise RequestRedirect(str('%s://%s%s%s/%s/' % (
self.url_scheme,
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name[:-1],
url_quote(path_info.lstrip('/'), self.map.charset)
)))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
for r in self.map._rules_by_endpoint[rule.endpoint]:
if r.provides_defaults_for(rule) and \
r.suitable_for(rv, method):
rv.update(r.defaults)
subdomain, path = r.build(rv)
raise RequestRedirect(str('%s://%s%s%s/%s' % (
self.url_scheme,
subdomain and subdomain + '.' or '',
self.server_name,
self.script_name[:-1],
url_quote(path.lstrip('/'), self.map.charset)
)))
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, basestring):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(urljoin('%s://%s%s%s' % (
self.url_scheme,
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except NotFound:
return False
return True
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
values = dict((k, v) for k, v in values.iteritems(multi=True)
if v is not None)
else:
values = dict((k, v) for k, v in values.iteritems()
if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method)
subdomain, path = rv
if not force_external and subdomain == self.subdomain:
return str(urljoin(self.script_name, path.lstrip('/')))
return str('%s://%s%s%s/%s' % (
self.url_scheme,
subdomain and subdomain + '.' or '',
self.server_name,
self.script_name[:-1],
path.lstrip('/')
))
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter
}
from werkzeug.datastructures import ImmutableDict, MultiDict
Map.default_converters = ImmutableDict(DEFAULT_CONVERTERS)
| bsd-3-clause |
zmaruo/coreclr | src/pal/automation/compile.py | 154 | 2660 | import logging as log
import sys
import getopt
import os
import subprocess
import shutil
def RunCMake(workspace, target, platform):
# run CMake
print "\n==================================================\n"
returncode = 0
if platform == "windows":
print "Running: vcvarsall.bat x86_amd64 && " + workspace + "\ProjectK\NDP\clr\src\pal\\tools\gen-buildsys-win.bat " + workspace + "\ProjectK\NDP\clr"
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(["vcvarsall.bat", "x86_amd64", "&&", workspace + "\ProjectK\NDP\clr\src\pal\\tools\gen-buildsys-win.bat", workspace + "\ProjectK\NDP\clr"])
elif platform == "linux":
print "Running: " + workspace + "/ProjectK/NDP/clr/src/pal/tools/gen-buildsys-clang.sh " + workspace + "/ProjectK/NDP/clr DEBUG"
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(workspace + "/ProjectK/NDP/clr/src/pal/tools/gen-buildsys-clang.sh " + workspace + "/ProjectK/NDP/clr " + target, shell=True)
if returncode != 0:
print "ERROR: cmake failed with exit code " + str(returncode)
return returncode
def RunBuild(target, platform, arch):
if platform == "windows":
return RunMsBuild(target, arch)
elif platform == "linux":
return RunMake()
def RunMsBuild(target, arch):
# run MsBuild
print "\n==================================================\n"
print "Running: vcvarsall.bat x86_amd64 && msbuild CoreCLR.sln /p:Configuration=" + target + " /p:Platform=" + arch
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(["vcvarsall.bat","x86_amd64","&&","msbuild","CoreCLR.sln","/p:Configuration=" + target,"/p:Platform=" + arch])
if returncode != 0:
print "ERROR: vcvarsall.bat failed with exit code " + str(returncode)
return returncode
def RunMake():
print "\n==================================================\n"
print "Running: make"
print "\n==================================================\n"
sys.stdout.flush()
returncode = subprocess.call(["make"])
if returncode != 0:
print "ERROR: make failed with exit code " + str(returncode)
return returncode
def Compile(workspace, target, platform, arch):
returncode = RunCMake(workspace, target, platform)
if returncode != 0:
return returncode
returncode += RunBuild(target, platform, arch)
if returncode != 0:
return returncode
return returncode
| mit |
Windowsfreak/OpenNI2 | Packaging/ReleaseVersion.py | 32 | 6788 | #!/usr/bin/python
#/****************************************************************************
#* *
#* OpenNI 2.x Alpha *
#* Copyright (C) 2012 PrimeSense Ltd. *
#* *
#* This file is part of OpenNI. *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#****************************************************************************/
import os
import re
import sys
import shutil
import subprocess
import platform
import argparse
import stat
import UpdateVersion
if len(sys.argv) < 2 or sys.argv[1] in ('-h','--help'):
print "usage: " + sys.argv[0] + " <x86|x64|Arm|android> [UpdateVersion]"
sys.exit(1)
plat = sys.argv[1]
origDir = os.getcwd()
shouldUpdate = 0
if len(sys.argv) >= 3 and sys.argv[2] == 'UpdateVersion':
shouldUpdate = 1
if shouldUpdate == 1:
# Increase Build
UpdateVersion.VERSION_BUILD += 1
UpdateVersion.update()
def get_reg_values(reg_key, value_list):
# open the reg key
try:
reg_key = win32api.RegOpenKeyEx(*reg_key)
except pywintypes.error as e:
raise Exception("Failed to open registry key!")
# Get the values
try:
values = [(win32api.RegQueryValueEx(reg_key, name), data_type) for name, data_type in value_list]
# values list of ((value, type), expected_type)
for (value, data_type), expected in values:
if data_type != expected:
raise Exception("Bad registry value type! Expected %d, got %d instead." % (expected, data_type))
# values okay, leave only values
values = [value for ((value, data_type), expected) in values]
except pywintypes.error as e:
raise Exception("Failed to get registry value!")
finally:
try:
win32api.RegCloseKey(reg_key)
except pywintypes.error as e:
# We don't care if reg key close failed...
pass
return tuple(values)
def calc_jobs_number():
cores = 1
try:
if isinstance(self, OSMac):
txt = gop('sysctl -n hw.physicalcpu')
else:
txt = gop('grep "processor\W:" /proc/cpuinfo | wc -l')
cores = int(txt)
except:
pass
return str(cores * 2)
# Create installer
strVersion = UpdateVersion.getVersionName()
print "Creating installer for OpenNI " + strVersion + " " + plat
finalDir = "Final"
if not os.path.isdir(finalDir):
os.mkdir(finalDir)
if plat == 'android':
if not 'NDK_ROOT' in os.environ:
print 'Please define NDK_ROOT!'
sys.exit(2)
ndkDir = os.environ['NDK_ROOT']
buildDir = 'AndroidBuild'
if os.path.isdir(buildDir):
shutil.rmtree(buildDir)
outputDir = 'OpenNI-android-' + strVersion
if os.path.isdir(outputDir):
shutil.rmtree(outputDir)
os.makedirs(buildDir + '/jni')
os.symlink('../../../', buildDir + '/jni/OpenNI2')
shutil.copy('../Android.mk', buildDir + '/jni')
shutil.copy('../Application.mk', buildDir + '/jni')
rc = subprocess.call([ ndkDir + '/ndk-build', '-C', buildDir, '-j8' ])
if rc != 0:
print 'Build failed!'
sys.exit(3)
finalFile = finalDir + '/' + outputDir + '.tar'
shutil.move(buildDir + '/libs/armeabi-v7a', outputDir)
# add config files
shutil.copy('../Config/OpenNI.ini', outputDir)
shutil.copy('../Config/OpenNI2/Drivers/PS1080.ini', outputDir)
print('Creating archive ' + finalFile)
subprocess.check_call(['tar', '-cf', finalFile, outputDir])
elif platform.system() == 'Windows':
import win32con,pywintypes,win32api,platform
(bits,linkage) = platform.architecture()
matchObject = re.search('64',bits)
is_64_bit_machine = matchObject is not None
if is_64_bit_machine:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Wow6432Node\Microsoft\VisualStudio\10.0")
else:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\VisualStudio\10.0")
MSVC_VALUES = [("InstallDir", win32con.REG_SZ)]
VS_INST_DIR = get_reg_values(MSVC_KEY, MSVC_VALUES)[0]
PROJECT_SLN = "..\OpenNI.sln"
bulidLog = origDir+'/build.Release.'+plat+'.txt'
devenv_cmd = '\"'+VS_INST_DIR + 'devenv\" '+PROJECT_SLN + ' /Project Install /Rebuild "Release|'+plat+'\" /out '+bulidLog
print(devenv_cmd)
subprocess.check_call(devenv_cmd, close_fds=True)
# everything OK, can remove build log
os.remove(bulidLog)
outFile = 'OpenNI-Windows-' + plat + '-' + strVersion + '.msi'
finalFile = os.path.join(finalDir, outFile)
if os.path.exists(finalFile):
os.remove(finalFile)
shutil.move('Install/bin/' + plat + '/en-us/' + outFile, finalDir)
elif platform.system() == 'Linux' or platform.system() == 'Darwin':
devNull = open('/dev/null', 'w')
subprocess.check_call(['make', '-C', '../', '-j' + calc_jobs_number(), 'PLATFORM=' + plat, 'clean'], stdout=devNull, stderr=devNull)
devNull.close()
buildLog = open(origDir + '/build.release.' + plat + '.log', 'w')
subprocess.check_call(['make', '-C', '../', '-j' + calc_jobs_number(), 'PLATFORM=' + plat, 'release'], stdout=buildLog, stderr=buildLog)
buildLog.close()
# everything OK, can remove build log
os.remove(origDir + '/build.release.' + plat + '.log')
else:
print "Unknown OS"
sys.exit(2)
# also copy Release Notes and CHANGES documents
shutil.copy('../ReleaseNotes.txt', finalDir)
shutil.copy('../CHANGES.txt', finalDir)
print "Installer can be found under: " + finalDir
print "Done"
| apache-2.0 |
iho/wagtail | wagtail/wagtailcore/migrations/0014_add_verbose_name.py | 26 | 4031 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0013_update_golive_expire_help_text'),
]
operations = [
migrations.AlterField(
model_name='grouppagepermission',
name='group',
field=models.ForeignKey(verbose_name='Group', related_name='page_permissions', to='auth.Group'),
preserve_default=True,
),
migrations.AlterField(
model_name='grouppagepermission',
name='page',
field=models.ForeignKey(verbose_name='Page', related_name='group_permissions', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='grouppagepermission',
name='permission_type',
field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Add/edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),
preserve_default=True,
),
migrations.AlterField(
model_name='page',
name='search_description',
field=models.TextField(blank=True, verbose_name='Search description'),
preserve_default=True,
),
migrations.AlterField(
model_name='page',
name='show_in_menus',
field=models.BooleanField(default=False, help_text='Whether a link to this page will appear in automatically generated menus', verbose_name='Show in menus'),
preserve_default=True,
),
migrations.AlterField(
model_name='page',
name='slug',
field=models.SlugField(help_text='The name of the page as it will appear in URLs e.g http://domain.com/blog/[my-slug]/', max_length=255, verbose_name='Slug'),
preserve_default=True,
),
migrations.AlterField(
model_name='page',
name='title',
field=models.CharField(help_text="The page title as you'd like it to be seen by the public", max_length=255, verbose_name='Title'),
preserve_default=True,
),
migrations.AlterField(
model_name='pageviewrestriction',
name='page',
field=models.ForeignKey(verbose_name='Page', related_name='view_restrictions', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='pageviewrestriction',
name='password',
field=models.CharField(max_length=255, verbose_name='Password'),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='hostname',
field=models.CharField(db_index=True, max_length=255, verbose_name='Hostname'),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='is_default_site',
field=models.BooleanField(default=False, help_text='If true, this site will handle requests for all other hostnames that do not have a site entry of their own', verbose_name='Is default site'),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='port',
field=models.IntegerField(default=80, help_text='Set this to something other than 80 if you need a specific port number to appear in URLs (e.g. development on port 8000). Does not affect request handling (so port forwarding still works).', verbose_name='Port'),
preserve_default=True,
),
migrations.AlterField(
model_name='site',
name='root_page',
field=models.ForeignKey(verbose_name='Root page', related_name='sites_rooted_here', to='wagtailcore.Page'),
preserve_default=True,
),
]
| bsd-3-clause |
samuellefever/server-tools | cron_run_manually/ir_cron.py | 42 | 2778 | # -*- coding: utf-8 -*-
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# Code snippets from openobject-server copyright (C) 2004-2013 OpenERP S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from openerp import _, api, exceptions, models, SUPERUSER_ID
from openerp.tools.safe_eval import safe_eval
from psycopg2 import OperationalError
_logger = logging.getLogger(__name__)
class Cron(models.Model):
_name = _inherit = "ir.cron"
@api.one
def run_manually(self):
"""Run a job from the cron form view."""
if self.env.uid != SUPERUSER_ID and (not self.active or
not self.numbercall):
raise exceptions.AccessError(
_('Only the admin user is allowed to '
'execute inactive cron jobs manually'))
try:
# Try to grab an exclusive lock on the job row
# until the end of the transaction
self.env.cr.execute(
"""SELECT *
FROM ir_cron
WHERE id=%s
FOR UPDATE NOWAIT""",
(self.id,),
log_exceptions=False)
except OperationalError as e:
# User friendly error if the lock could not be claimed
if getattr(e, "pgcode", None) == '55P03':
raise exceptions.Warning(
_('Another process/thread is already busy '
'executing this job'))
raise
_logger.info('Job `%s` triggered from form', self.name)
# Do not propagate active_test to the method to execute
ctx = dict(self.env.context)
ctx.pop('active_test', None)
# Execute the cron job
method = getattr(
self.with_context(ctx).sudo(self.user_id).env[self.model],
self.function)
args = safe_eval('tuple(%s)' % (self.args or ''))
return method(*args)
@api.model
def _current_uid(self):
"""This function returns the current UID, for testing purposes."""
return self.env.uid
| agpl-3.0 |
andreadean5/python-hpOneView | hpOneView/resources/servers/id_pools_vsn_ranges.py | 1 | 1946 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
__title__ = 'id-pools-vsn-ranges'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP'
__license__ = 'MIT'
__status__ = 'Development'
from hpOneView.resources.resource import ResourceClient
from hpOneView.resources.servers.id_pools_ranges import IdPoolsRanges
class IdPoolsVsnRanges(IdPoolsRanges):
URI = '/rest/id-pools/vsn/ranges'
def __init__(self, con):
IdPoolsRanges.__init__(self, object, self.URI)
self._connection = con
self._client = ResourceClient(con, self.URI)
| mit |
kaday/rose | lib/python/rose/macros/duplicate.py | 1 | 3415 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import re
import rose.macro
class DuplicateChecker(rose.macro.MacroBase):
"""Returns settings whose duplicate status does not match their name."""
WARNING_DUPL_SECT_NO_NUM = ('incorrect "duplicate=true" metadata')
WARNING_NUM_SECT_NO_DUPL = ('{0} requires "duplicate=true" metadata')
def validate(self, config, meta_config=None):
"""Return a list of errors, if any."""
self.reports = []
sect_error_no_dupl = {}
sect_keys = config.value.keys()
sorter = rose.config.sort_settings
sect_keys.sort(sorter)
for section in sect_keys:
node = config.get([section])
if not isinstance(node.value, dict):
continue
metadata = self.get_metadata_for_config_id(section, meta_config)
duplicate = metadata.get(rose.META_PROP_DUPLICATE)
is_duplicate = duplicate == rose.META_PROP_VALUE_TRUE
basic_section = rose.macro.REC_ID_STRIP.sub("", section)
if is_duplicate:
if basic_section == section:
self.add_report(section, None, None,
self.WARNING_DUPL_SECT_NO_NUM)
elif section != basic_section:
if basic_section not in sect_error_no_dupl:
sect_error_no_dupl.update({basic_section: 1})
no_index_section = rose.macro.REC_ID_STRIP_DUPL.sub(
"", section)
if no_index_section != section:
basic_section = no_index_section
warning = self.WARNING_NUM_SECT_NO_DUPL
if self._get_has_metadata(metadata, basic_section,
meta_config):
self.add_report(section, None, None,
warning.format(basic_section))
return self.reports
def _get_has_metadata(self, metadata, basic_section, meta_config):
if metadata.keys() != ["id"]:
return True
for meta_keys, meta_node in meta_config.walk(no_ignore=True):
meta_section = meta_keys[0]
if len(meta_keys) > 1:
continue
if ((meta_section == basic_section or
meta_section.startswith(
basic_section + rose.CONFIG_DELIMITER)) and
isinstance(meta_node.value, dict)):
return True
return False
| gpl-3.0 |
Alwnikrotikz/pmx | scripts/DTI_analysis.py | 2 | 88009 |
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" >
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" >
<meta name="ROBOTS" content="NOARCHIVE">
<link rel="icon" type="image/vnd.microsoft.icon" href="https://ssl.gstatic.com/codesite/ph/images/phosting.ico">
<script type="text/javascript">
var codesite_token = "3Cd3YLziNQwHJ6q0INBaXA2gZls:1366032649547";
var CS_env = {"token":"3Cd3YLziNQwHJ6q0INBaXA2gZls:1366032649547","projectName":"pmx","domainName":null,"assetHostPath":"https://ssl.gstatic.com/codesite/ph","loggedInUserEmail":"[email protected]","profileUrl":"/u/110130407061490526737/","assetVersionPath":"https://ssl.gstatic.com/codesite/ph/14689258884487974863","projectHomeUrl":"/p/pmx","relativeBaseUrl":""};
var _gaq = _gaq || [];
_gaq.push(
['siteTracker._setAccount', 'UA-18071-1'],
['siteTracker._trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(ga);
})();
</script>
<title>DTI_analysis.py -
pmx -
python library and tools for computational and structural biophysics - Google Project Hosting
</title>
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/14689258884487974863/css/core.css">
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/14689258884487974863/css/ph_detail.css" >
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/14689258884487974863/css/d_sb.css" >
<!--[if IE]>
<link type="text/css" rel="stylesheet" href="https://ssl.gstatic.com/codesite/ph/14689258884487974863/css/d_ie.css" >
<![endif]-->
<style type="text/css">
.menuIcon.off { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -42px }
.menuIcon.on { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -28px }
.menuIcon.down { background: no-repeat url(https://ssl.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 0; }
tr.inline_comment {
background: #fff;
vertical-align: top;
}
div.draft, div.published {
padding: .3em;
border: 1px solid #999;
margin-bottom: .1em;
font-family: arial, sans-serif;
max-width: 60em;
}
div.draft {
background: #ffa;
}
div.published {
background: #e5ecf9;
}
div.published .body, div.draft .body {
padding: .5em .1em .1em .1em;
max-width: 60em;
white-space: pre-wrap;
white-space: -moz-pre-wrap;
white-space: -pre-wrap;
white-space: -o-pre-wrap;
word-wrap: break-word;
font-size: 1em;
}
div.draft .actions {
margin-left: 1em;
font-size: 90%;
}
div.draft form {
padding: .5em .5em .5em 0;
}
div.draft textarea, div.published textarea {
width: 95%;
height: 10em;
font-family: arial, sans-serif;
margin-bottom: .5em;
}
.nocursor, .nocursor td, .cursor_hidden, .cursor_hidden td {
background-color: white;
height: 2px;
}
.cursor, .cursor td {
background-color: darkblue;
height: 2px;
display: '';
}
.list {
border: 1px solid white;
border-bottom: 0;
}
</style>
</head>
<body class="t4">
<script type="text/javascript">
window.___gcfg = {lang: 'en'};
(function()
{var po = document.createElement("script");
po.type = "text/javascript"; po.async = true;po.src = "https://apis.google.com/js/plusone.js";
var s = document.getElementsByTagName("script")[0];
s.parentNode.insertBefore(po, s);
})();
</script>
<div class="headbg">
<div id="gaia">
<span>
<b>[email protected]</b>
| <a href="/u/110130407061490526737/" id="projects-dropdown" onclick="return false;"
><u>My favorites</u> <small>▼</small></a>
| <a href="/u/110130407061490526737/" onclick="_CS_click('/gb/ph/profile');"
title="Profile, Updates, and Settings"
><u>Profile</u></a>
| <a href="https://www.google.com/accounts/Logout?continue=https%3A%2F%2Fcode.google.com%2Fp%2Fpmx%2Fsource%2Fbrowse%2Fscripts%2FDTI_analysis.py"
onclick="_CS_click('/gb/ph/signout');"
><u>Sign out</u></a>
</span>
</div>
<div class="gbh" style="left: 0pt;"></div>
<div class="gbh" style="right: 0pt;"></div>
<div style="height: 1px"></div>
<!--[if lte IE 7]>
<div style="text-align:center;">
Your version of Internet Explorer is not supported. Try a browser that
contributes to open source, such as <a href="http://www.firefox.com">Firefox</a>,
<a href="http://www.google.com/chrome">Google Chrome</a>, or
<a href="http://code.google.com/chrome/chromeframe/">Google Chrome Frame</a>.
</div>
<![endif]-->
<table style="padding:0px; margin: 0px 0px 10px 0px; width:100%" cellpadding="0" cellspacing="0"
itemscope itemtype="http://schema.org/CreativeWork">
<tr style="height: 58px;">
<td id="plogo">
<link itemprop="url" href="/p/pmx">
<a href="/p/pmx/">
<img src="/p/pmx/logo?cct=1355339915"
alt="Logo" itemprop="image">
</a>
</td>
<td style="padding-left: 0.5em">
<div id="pname">
<a href="/p/pmx/"><span itemprop="name">pmx</span></a>
</div>
<div id="psum">
<a id="project_summary_link"
href="/p/pmx/"><span itemprop="description">python library and tools for computational and structural biophysics</span></a>
</div>
</td>
<td style="white-space:nowrap;text-align:right; vertical-align:bottom;">
<form action="/hosting/search">
<input size="30" name="q" value="" type="text">
<input type="submit" name="projectsearch" value="Search projects" >
</form>
</tr>
</table>
</div>
<div id="mt" class="gtb">
<a href="/p/pmx/" class="tab ">Project Home</a>
<a href="/p/pmx/downloads/list" class="tab ">Downloads</a>
<a href="/p/pmx/w/list" class="tab ">Wiki</a>
<a href="/p/pmx/issues/list"
class="tab ">Issues</a>
<a href="/p/pmx/source/checkout"
class="tab active">Source</a>
<div class=gtbc></div>
</div>
<table cellspacing="0" cellpadding="0" width="100%" align="center" border="0" class="st">
<tr>
<td class="subt">
<div class="st2">
<div class="isf">
<form action="/p/pmx/source/browse" style="display: inline">
Repository:
<select name="repo" id="repo" style="font-size: 92%" onchange="submit()">
<option value="default">default</option><option value="wiki">wiki</option>
</select>
</form>
<span class="inst1"><a href="/p/pmx/source/checkout">Checkout</a></span>
<span class="inst2"><a href="/p/pmx/source/browse/">Browse</a></span>
<span class="inst3"><a href="/p/pmx/source/list">Changes</a></span>
<span class="inst4"><a href="/p/pmx/source/clones">Clones</a></span>
<a href="/p/pmx/issues/entry?show=review&former=sourcelist">Request code review</a>
</form>
<script type="text/javascript">
function codesearchQuery(form) {
var query = document.getElementById('q').value;
if (query) { form.action += '%20' + query; }
}
</script>
</div>
</div>
</td>
<td align="right" valign="top" class="bevel-right"></td>
</tr>
</table>
<script type="text/javascript">
var cancelBubble = false;
function _go(url) { document.location = url; }
</script>
<div id="maincol"
>
<div class="collapse">
<div id="colcontrol">
<style type="text/css">
#file_flipper { white-space: nowrap; padding-right: 2em; }
#file_flipper.hidden { display: none; }
#file_flipper .pagelink { color: #0000CC; text-decoration: underline; }
#file_flipper #visiblefiles { padding-left: 0.5em; padding-right: 0.5em; }
</style>
<table id="nav_and_rev" class="list"
cellpadding="0" cellspacing="0" width="100%">
<tr>
<td nowrap="nowrap" class="src_crumbs src_nav" width="33%">
<strong class="src_nav">Source path: </strong>
<span id="crumb_root">
<a href="/p/pmx/source/browse/">git</a>/ </span>
<span id="crumb_links" class="ifClosed"><a href="/p/pmx/source/browse/scripts/">scripts</a><span class="sp">/ </span>DTI_analysis.py</span>
<form class="src_nav">
<span class="sourcelabel"><strong>Branch:</strong>
<select id="branch_select" name="name" onchange="submit()">
<option value="David"
>
David
</option>
<option value="Upgradegrom4.6"
>
Upgradegrom4.6
</option>
<option value="master"
selected>
master
</option>
</select>
</span>
</form>
</td>
<td nowrap="nowrap" width="33%" align="center">
<a href="/p/pmx/source/browse/scripts/DTI_analysis.py?edit=1"
><img src="https://ssl.gstatic.com/codesite/ph/images/pencil-y14.png"
class="edit_icon">Edit file</a>
</td>
<td nowrap="nowrap" width="33%" align="right">
<table cellpadding="0" cellspacing="0" style="font-size: 100%"><tr>
<td class="flipper">
<ul class="leftside">
<li><a href="/p/pmx/source/browse/scripts/DTI_analysis.py?r=a2102ac8113476c16e34079d2812b130339f54bb" title="Previous">‹a2102ac81134</a></li>
</ul>
</td>
<td class="flipper"><b>82a17baf41be</b></td>
</tr></table>
</td>
</tr>
</table>
<div class="fc">
<style type="text/css">
.undermouse span {
background-image: url(https://ssl.gstatic.com/codesite/ph/images/comments.gif); }
</style>
<table class="opened" id="review_comment_area"
onmouseout="gutterOut()"><tr>
<td id="nums">
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
<pre><table width="100%" id="nums_table_0"><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_1"
onmouseover="gutterOver(1)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',1);"> </span
></td><td id="1"><a href="#1">1</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_2"
onmouseover="gutterOver(2)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',2);"> </span
></td><td id="2"><a href="#2">2</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_3"
onmouseover="gutterOver(3)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',3);"> </span
></td><td id="3"><a href="#3">3</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_4"
onmouseover="gutterOver(4)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',4);"> </span
></td><td id="4"><a href="#4">4</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_5"
onmouseover="gutterOver(5)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',5);"> </span
></td><td id="5"><a href="#5">5</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_6"
onmouseover="gutterOver(6)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',6);"> </span
></td><td id="6"><a href="#6">6</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_7"
onmouseover="gutterOver(7)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',7);"> </span
></td><td id="7"><a href="#7">7</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_8"
onmouseover="gutterOver(8)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',8);"> </span
></td><td id="8"><a href="#8">8</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_9"
onmouseover="gutterOver(9)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',9);"> </span
></td><td id="9"><a href="#9">9</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_10"
onmouseover="gutterOver(10)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',10);"> </span
></td><td id="10"><a href="#10">10</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_11"
onmouseover="gutterOver(11)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',11);"> </span
></td><td id="11"><a href="#11">11</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_12"
onmouseover="gutterOver(12)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',12);"> </span
></td><td id="12"><a href="#12">12</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_13"
onmouseover="gutterOver(13)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',13);"> </span
></td><td id="13"><a href="#13">13</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_14"
onmouseover="gutterOver(14)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',14);"> </span
></td><td id="14"><a href="#14">14</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_15"
onmouseover="gutterOver(15)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',15);"> </span
></td><td id="15"><a href="#15">15</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_16"
onmouseover="gutterOver(16)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',16);"> </span
></td><td id="16"><a href="#16">16</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_17"
onmouseover="gutterOver(17)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',17);"> </span
></td><td id="17"><a href="#17">17</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_18"
onmouseover="gutterOver(18)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',18);"> </span
></td><td id="18"><a href="#18">18</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_19"
onmouseover="gutterOver(19)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',19);"> </span
></td><td id="19"><a href="#19">19</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_20"
onmouseover="gutterOver(20)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',20);"> </span
></td><td id="20"><a href="#20">20</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_21"
onmouseover="gutterOver(21)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',21);"> </span
></td><td id="21"><a href="#21">21</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_22"
onmouseover="gutterOver(22)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',22);"> </span
></td><td id="22"><a href="#22">22</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_23"
onmouseover="gutterOver(23)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',23);"> </span
></td><td id="23"><a href="#23">23</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_24"
onmouseover="gutterOver(24)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',24);"> </span
></td><td id="24"><a href="#24">24</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_25"
onmouseover="gutterOver(25)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',25);"> </span
></td><td id="25"><a href="#25">25</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_26"
onmouseover="gutterOver(26)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',26);"> </span
></td><td id="26"><a href="#26">26</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_27"
onmouseover="gutterOver(27)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',27);"> </span
></td><td id="27"><a href="#27">27</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_28"
onmouseover="gutterOver(28)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',28);"> </span
></td><td id="28"><a href="#28">28</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_29"
onmouseover="gutterOver(29)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',29);"> </span
></td><td id="29"><a href="#29">29</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_30"
onmouseover="gutterOver(30)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',30);"> </span
></td><td id="30"><a href="#30">30</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_31"
onmouseover="gutterOver(31)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',31);"> </span
></td><td id="31"><a href="#31">31</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_32"
onmouseover="gutterOver(32)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',32);"> </span
></td><td id="32"><a href="#32">32</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_33"
onmouseover="gutterOver(33)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',33);"> </span
></td><td id="33"><a href="#33">33</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_34"
onmouseover="gutterOver(34)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',34);"> </span
></td><td id="34"><a href="#34">34</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_35"
onmouseover="gutterOver(35)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',35);"> </span
></td><td id="35"><a href="#35">35</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_36"
onmouseover="gutterOver(36)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',36);"> </span
></td><td id="36"><a href="#36">36</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_37"
onmouseover="gutterOver(37)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',37);"> </span
></td><td id="37"><a href="#37">37</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_38"
onmouseover="gutterOver(38)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',38);"> </span
></td><td id="38"><a href="#38">38</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_39"
onmouseover="gutterOver(39)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',39);"> </span
></td><td id="39"><a href="#39">39</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_40"
onmouseover="gutterOver(40)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',40);"> </span
></td><td id="40"><a href="#40">40</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_41"
onmouseover="gutterOver(41)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',41);"> </span
></td><td id="41"><a href="#41">41</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_42"
onmouseover="gutterOver(42)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',42);"> </span
></td><td id="42"><a href="#42">42</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_43"
onmouseover="gutterOver(43)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',43);"> </span
></td><td id="43"><a href="#43">43</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_44"
onmouseover="gutterOver(44)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',44);"> </span
></td><td id="44"><a href="#44">44</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_45"
onmouseover="gutterOver(45)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',45);"> </span
></td><td id="45"><a href="#45">45</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_46"
onmouseover="gutterOver(46)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',46);"> </span
></td><td id="46"><a href="#46">46</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_47"
onmouseover="gutterOver(47)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',47);"> </span
></td><td id="47"><a href="#47">47</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_48"
onmouseover="gutterOver(48)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',48);"> </span
></td><td id="48"><a href="#48">48</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_49"
onmouseover="gutterOver(49)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',49);"> </span
></td><td id="49"><a href="#49">49</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_50"
onmouseover="gutterOver(50)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',50);"> </span
></td><td id="50"><a href="#50">50</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_51"
onmouseover="gutterOver(51)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',51);"> </span
></td><td id="51"><a href="#51">51</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_52"
onmouseover="gutterOver(52)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',52);"> </span
></td><td id="52"><a href="#52">52</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_53"
onmouseover="gutterOver(53)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',53);"> </span
></td><td id="53"><a href="#53">53</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_54"
onmouseover="gutterOver(54)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',54);"> </span
></td><td id="54"><a href="#54">54</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_55"
onmouseover="gutterOver(55)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',55);"> </span
></td><td id="55"><a href="#55">55</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_56"
onmouseover="gutterOver(56)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',56);"> </span
></td><td id="56"><a href="#56">56</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_57"
onmouseover="gutterOver(57)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',57);"> </span
></td><td id="57"><a href="#57">57</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_58"
onmouseover="gutterOver(58)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',58);"> </span
></td><td id="58"><a href="#58">58</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_59"
onmouseover="gutterOver(59)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',59);"> </span
></td><td id="59"><a href="#59">59</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_60"
onmouseover="gutterOver(60)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',60);"> </span
></td><td id="60"><a href="#60">60</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_61"
onmouseover="gutterOver(61)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',61);"> </span
></td><td id="61"><a href="#61">61</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_62"
onmouseover="gutterOver(62)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',62);"> </span
></td><td id="62"><a href="#62">62</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_63"
onmouseover="gutterOver(63)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',63);"> </span
></td><td id="63"><a href="#63">63</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_64"
onmouseover="gutterOver(64)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',64);"> </span
></td><td id="64"><a href="#64">64</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_65"
onmouseover="gutterOver(65)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',65);"> </span
></td><td id="65"><a href="#65">65</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_66"
onmouseover="gutterOver(66)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',66);"> </span
></td><td id="66"><a href="#66">66</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_67"
onmouseover="gutterOver(67)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',67);"> </span
></td><td id="67"><a href="#67">67</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_68"
onmouseover="gutterOver(68)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',68);"> </span
></td><td id="68"><a href="#68">68</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_69"
onmouseover="gutterOver(69)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',69);"> </span
></td><td id="69"><a href="#69">69</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_70"
onmouseover="gutterOver(70)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',70);"> </span
></td><td id="70"><a href="#70">70</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_71"
onmouseover="gutterOver(71)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',71);"> </span
></td><td id="71"><a href="#71">71</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_72"
onmouseover="gutterOver(72)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',72);"> </span
></td><td id="72"><a href="#72">72</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_73"
onmouseover="gutterOver(73)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',73);"> </span
></td><td id="73"><a href="#73">73</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_74"
onmouseover="gutterOver(74)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',74);"> </span
></td><td id="74"><a href="#74">74</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_75"
onmouseover="gutterOver(75)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',75);"> </span
></td><td id="75"><a href="#75">75</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_76"
onmouseover="gutterOver(76)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',76);"> </span
></td><td id="76"><a href="#76">76</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_77"
onmouseover="gutterOver(77)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',77);"> </span
></td><td id="77"><a href="#77">77</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_78"
onmouseover="gutterOver(78)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',78);"> </span
></td><td id="78"><a href="#78">78</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_79"
onmouseover="gutterOver(79)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',79);"> </span
></td><td id="79"><a href="#79">79</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_80"
onmouseover="gutterOver(80)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',80);"> </span
></td><td id="80"><a href="#80">80</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_81"
onmouseover="gutterOver(81)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',81);"> </span
></td><td id="81"><a href="#81">81</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_82"
onmouseover="gutterOver(82)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',82);"> </span
></td><td id="82"><a href="#82">82</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_83"
onmouseover="gutterOver(83)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',83);"> </span
></td><td id="83"><a href="#83">83</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_84"
onmouseover="gutterOver(84)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',84);"> </span
></td><td id="84"><a href="#84">84</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_85"
onmouseover="gutterOver(85)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',85);"> </span
></td><td id="85"><a href="#85">85</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_86"
onmouseover="gutterOver(86)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',86);"> </span
></td><td id="86"><a href="#86">86</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_87"
onmouseover="gutterOver(87)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',87);"> </span
></td><td id="87"><a href="#87">87</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_88"
onmouseover="gutterOver(88)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',88);"> </span
></td><td id="88"><a href="#88">88</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_89"
onmouseover="gutterOver(89)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',89);"> </span
></td><td id="89"><a href="#89">89</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_90"
onmouseover="gutterOver(90)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',90);"> </span
></td><td id="90"><a href="#90">90</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_91"
onmouseover="gutterOver(91)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',91);"> </span
></td><td id="91"><a href="#91">91</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_92"
onmouseover="gutterOver(92)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',92);"> </span
></td><td id="92"><a href="#92">92</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_93"
onmouseover="gutterOver(93)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',93);"> </span
></td><td id="93"><a href="#93">93</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_94"
onmouseover="gutterOver(94)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',94);"> </span
></td><td id="94"><a href="#94">94</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_95"
onmouseover="gutterOver(95)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',95);"> </span
></td><td id="95"><a href="#95">95</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_96"
onmouseover="gutterOver(96)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',96);"> </span
></td><td id="96"><a href="#96">96</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_97"
onmouseover="gutterOver(97)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',97);"> </span
></td><td id="97"><a href="#97">97</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_98"
onmouseover="gutterOver(98)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',98);"> </span
></td><td id="98"><a href="#98">98</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_99"
onmouseover="gutterOver(99)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',99);"> </span
></td><td id="99"><a href="#99">99</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_100"
onmouseover="gutterOver(100)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',100);"> </span
></td><td id="100"><a href="#100">100</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_101"
onmouseover="gutterOver(101)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',101);"> </span
></td><td id="101"><a href="#101">101</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_102"
onmouseover="gutterOver(102)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',102);"> </span
></td><td id="102"><a href="#102">102</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_103"
onmouseover="gutterOver(103)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',103);"> </span
></td><td id="103"><a href="#103">103</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_104"
onmouseover="gutterOver(104)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',104);"> </span
></td><td id="104"><a href="#104">104</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_105"
onmouseover="gutterOver(105)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',105);"> </span
></td><td id="105"><a href="#105">105</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_106"
onmouseover="gutterOver(106)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',106);"> </span
></td><td id="106"><a href="#106">106</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_107"
onmouseover="gutterOver(107)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',107);"> </span
></td><td id="107"><a href="#107">107</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_108"
onmouseover="gutterOver(108)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',108);"> </span
></td><td id="108"><a href="#108">108</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_109"
onmouseover="gutterOver(109)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',109);"> </span
></td><td id="109"><a href="#109">109</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_110"
onmouseover="gutterOver(110)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',110);"> </span
></td><td id="110"><a href="#110">110</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_111"
onmouseover="gutterOver(111)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',111);"> </span
></td><td id="111"><a href="#111">111</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_112"
onmouseover="gutterOver(112)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',112);"> </span
></td><td id="112"><a href="#112">112</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_113"
onmouseover="gutterOver(113)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',113);"> </span
></td><td id="113"><a href="#113">113</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_114"
onmouseover="gutterOver(114)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',114);"> </span
></td><td id="114"><a href="#114">114</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_115"
onmouseover="gutterOver(115)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',115);"> </span
></td><td id="115"><a href="#115">115</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_116"
onmouseover="gutterOver(116)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',116);"> </span
></td><td id="116"><a href="#116">116</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_117"
onmouseover="gutterOver(117)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',117);"> </span
></td><td id="117"><a href="#117">117</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_118"
onmouseover="gutterOver(118)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',118);"> </span
></td><td id="118"><a href="#118">118</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_119"
onmouseover="gutterOver(119)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',119);"> </span
></td><td id="119"><a href="#119">119</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_120"
onmouseover="gutterOver(120)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',120);"> </span
></td><td id="120"><a href="#120">120</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_121"
onmouseover="gutterOver(121)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',121);"> </span
></td><td id="121"><a href="#121">121</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_122"
onmouseover="gutterOver(122)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',122);"> </span
></td><td id="122"><a href="#122">122</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_123"
onmouseover="gutterOver(123)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',123);"> </span
></td><td id="123"><a href="#123">123</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_124"
onmouseover="gutterOver(124)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',124);"> </span
></td><td id="124"><a href="#124">124</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_125"
onmouseover="gutterOver(125)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',125);"> </span
></td><td id="125"><a href="#125">125</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_126"
onmouseover="gutterOver(126)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',126);"> </span
></td><td id="126"><a href="#126">126</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_127"
onmouseover="gutterOver(127)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',127);"> </span
></td><td id="127"><a href="#127">127</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_128"
onmouseover="gutterOver(128)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',128);"> </span
></td><td id="128"><a href="#128">128</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_129"
onmouseover="gutterOver(129)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',129);"> </span
></td><td id="129"><a href="#129">129</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_130"
onmouseover="gutterOver(130)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',130);"> </span
></td><td id="130"><a href="#130">130</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_131"
onmouseover="gutterOver(131)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',131);"> </span
></td><td id="131"><a href="#131">131</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_132"
onmouseover="gutterOver(132)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',132);"> </span
></td><td id="132"><a href="#132">132</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_133"
onmouseover="gutterOver(133)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',133);"> </span
></td><td id="133"><a href="#133">133</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_134"
onmouseover="gutterOver(134)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',134);"> </span
></td><td id="134"><a href="#134">134</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_135"
onmouseover="gutterOver(135)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',135);"> </span
></td><td id="135"><a href="#135">135</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_136"
onmouseover="gutterOver(136)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',136);"> </span
></td><td id="136"><a href="#136">136</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_137"
onmouseover="gutterOver(137)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',137);"> </span
></td><td id="137"><a href="#137">137</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_138"
onmouseover="gutterOver(138)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',138);"> </span
></td><td id="138"><a href="#138">138</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_139"
onmouseover="gutterOver(139)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',139);"> </span
></td><td id="139"><a href="#139">139</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_140"
onmouseover="gutterOver(140)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',140);"> </span
></td><td id="140"><a href="#140">140</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_141"
onmouseover="gutterOver(141)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',141);"> </span
></td><td id="141"><a href="#141">141</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_142"
onmouseover="gutterOver(142)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',142);"> </span
></td><td id="142"><a href="#142">142</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_143"
onmouseover="gutterOver(143)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',143);"> </span
></td><td id="143"><a href="#143">143</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_144"
onmouseover="gutterOver(144)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',144);"> </span
></td><td id="144"><a href="#144">144</a></td></tr
><tr id="gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_145"
onmouseover="gutterOver(145)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b',145);"> </span
></td><td id="145"><a href="#145">145</a></td></tr
></table></pre>
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
</td>
<td id="lines">
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
<pre class="prettyprint lang-py"><table id="src_table_0"><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_1
onmouseover="gutterOver(1)"
><td class="source"># pmx Copyright Notice<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_2
onmouseover="gutterOver(2)"
><td class="source"># ============================<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_3
onmouseover="gutterOver(3)"
><td class="source">#<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_4
onmouseover="gutterOver(4)"
><td class="source"># The pmx source code is copyrighted, but you can freely use and<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_5
onmouseover="gutterOver(5)"
><td class="source"># copy it as long as you don't change or remove any of the copyright<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_6
onmouseover="gutterOver(6)"
><td class="source"># notices.<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_7
onmouseover="gutterOver(7)"
><td class="source">#<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_8
onmouseover="gutterOver(8)"
><td class="source"># ----------------------------------------------------------------------<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_9
onmouseover="gutterOver(9)"
><td class="source"># pmx is Copyright (C) 2006-2013 by Daniel Seeliger<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_10
onmouseover="gutterOver(10)"
><td class="source">#<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_11
onmouseover="gutterOver(11)"
><td class="source"># All Rights Reserved<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_12
onmouseover="gutterOver(12)"
><td class="source">#<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_13
onmouseover="gutterOver(13)"
><td class="source"># Permission to use, copy, modify, distribute, and distribute modified<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_14
onmouseover="gutterOver(14)"
><td class="source"># versions of this software and its documentation for any purpose and<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_15
onmouseover="gutterOver(15)"
><td class="source"># without fee is hereby granted, provided that the above copyright<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_16
onmouseover="gutterOver(16)"
><td class="source"># notice appear in all copies and that both the copyright notice and<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_17
onmouseover="gutterOver(17)"
><td class="source"># this permission notice appear in supporting documentation, and that<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_18
onmouseover="gutterOver(18)"
><td class="source"># the name of Daniel Seeliger not be used in advertising or publicity<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_19
onmouseover="gutterOver(19)"
><td class="source"># pertaining to distribution of the software without specific, written<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_20
onmouseover="gutterOver(20)"
><td class="source"># prior permission.<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_21
onmouseover="gutterOver(21)"
><td class="source">#<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_22
onmouseover="gutterOver(22)"
><td class="source"># DANIEL SEELIGER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_23
onmouseover="gutterOver(23)"
><td class="source"># SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_24
onmouseover="gutterOver(24)"
><td class="source"># FITNESS. IN NO EVENT SHALL DANIEL SEELIGER BE LIABLE FOR ANY<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_25
onmouseover="gutterOver(25)"
><td class="source"># SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_26
onmouseover="gutterOver(26)"
><td class="source"># RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_27
onmouseover="gutterOver(27)"
><td class="source"># CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_28
onmouseover="gutterOver(28)"
><td class="source"># CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_29
onmouseover="gutterOver(29)"
><td class="source"># ----------------------------------------------------------------------<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_30
onmouseover="gutterOver(30)"
><td class="source">import sys, os<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_31
onmouseover="gutterOver(31)"
><td class="source">from glob import glob<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_32
onmouseover="gutterOver(32)"
><td class="source">from numpy import *<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_33
onmouseover="gutterOver(33)"
><td class="source">from pmx import *<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_34
onmouseover="gutterOver(34)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_35
onmouseover="gutterOver(35)"
><td class="source">def read_data(fn, b = 0, e = -1):<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_36
onmouseover="gutterOver(36)"
><td class="source"> if e == -1: e = 9999999999<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_37
onmouseover="gutterOver(37)"
><td class="source"> l = open(fn).readlines()<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_38
onmouseover="gutterOver(38)"
><td class="source"> data = []<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_39
onmouseover="gutterOver(39)"
><td class="source"> for line in l:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_40
onmouseover="gutterOver(40)"
><td class="source"> if line[0] not in ['@','#']:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_41
onmouseover="gutterOver(41)"
><td class="source"> entr = line.split()<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_42
onmouseover="gutterOver(42)"
><td class="source"> try:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_43
onmouseover="gutterOver(43)"
><td class="source"> time = float(entr[0])<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_44
onmouseover="gutterOver(44)"
><td class="source"> if time > b and time < e:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_45
onmouseover="gutterOver(45)"
><td class="source"> data.append( float(entr[1] ) )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_46
onmouseover="gutterOver(46)"
><td class="source"> except:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_47
onmouseover="gutterOver(47)"
><td class="source"> pass<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_48
onmouseover="gutterOver(48)"
><td class="source"># print >>sys.stderr, 'Read file:', fn, ' with %d data points' % len(data)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_49
onmouseover="gutterOver(49)"
><td class="source"> return data<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_50
onmouseover="gutterOver(50)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_51
onmouseover="gutterOver(51)"
><td class="source">def datapoint_from_time(time):<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_52
onmouseover="gutterOver(52)"
><td class="source"> return time*500<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_53
onmouseover="gutterOver(53)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_54
onmouseover="gutterOver(54)"
><td class="source">def block_aver( data, block_size = 1000):<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_55
onmouseover="gutterOver(55)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_56
onmouseover="gutterOver(56)"
><td class="source"> total_time = len(data) / 500.<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_57
onmouseover="gutterOver(57)"
><td class="source"> next_time = block_size<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_58
onmouseover="gutterOver(58)"
><td class="source"> results = []<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_59
onmouseover="gutterOver(59)"
><td class="source"> offset = 0<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_60
onmouseover="gutterOver(60)"
><td class="source"> while next_time < total_time:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_61
onmouseover="gutterOver(61)"
><td class="source"> beg = datapoint_from_time(offset)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_62
onmouseover="gutterOver(62)"
><td class="source"> end = datapoint_from_time(next_time)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_63
onmouseover="gutterOver(63)"
><td class="source"> res = average( data[beg:end] )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_64
onmouseover="gutterOver(64)"
><td class="source"> results.append( (str(offset)+'-'+str(next_time), res ) )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_65
onmouseover="gutterOver(65)"
><td class="source"> offset = next_time<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_66
onmouseover="gutterOver(66)"
><td class="source"> next_time += block_size<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_67
onmouseover="gutterOver(67)"
><td class="source"> return results<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_68
onmouseover="gutterOver(68)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_69
onmouseover="gutterOver(69)"
><td class="source">def convergence( data, block_size = 1000):<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_70
onmouseover="gutterOver(70)"
><td class="source"> total_time = len(data) / 500.<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_71
onmouseover="gutterOver(71)"
><td class="source"> next_time = block_size<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_72
onmouseover="gutterOver(72)"
><td class="source"> results = []<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_73
onmouseover="gutterOver(73)"
><td class="source"> offset = 0<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_74
onmouseover="gutterOver(74)"
><td class="source"> while next_time < total_time:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_75
onmouseover="gutterOver(75)"
><td class="source"> beg = datapoint_from_time(offset)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_76
onmouseover="gutterOver(76)"
><td class="source"> end = datapoint_from_time(next_time)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_77
onmouseover="gutterOver(77)"
><td class="source"> res = average( data[beg:end] )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_78
onmouseover="gutterOver(78)"
><td class="source"> results.append( (next_time, res ) )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_79
onmouseover="gutterOver(79)"
><td class="source"> next_time += block_size<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_80
onmouseover="gutterOver(80)"
><td class="source"> return results<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_81
onmouseover="gutterOver(81)"
><td class="source"> <br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_82
onmouseover="gutterOver(82)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_83
onmouseover="gutterOver(83)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_84
onmouseover="gutterOver(84)"
><td class="source">help_text = ('Calculate delta G from multiple DTI runs',)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_85
onmouseover="gutterOver(85)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_86
onmouseover="gutterOver(86)"
><td class="source">options = [<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_87
onmouseover="gutterOver(87)"
><td class="source"> Option( "-b", "real", 500, "Start time [ps]"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_88
onmouseover="gutterOver(88)"
><td class="source"> Option( "-e", "real", -1, "End time[ps]"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_89
onmouseover="gutterOver(89)"
><td class="source"> Option( "-block1", "int", 100, "Time[ps] for block average"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_90
onmouseover="gutterOver(90)"
><td class="source"> Option( "-block2", "int", 500, "Time[ps] for block average"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_91
onmouseover="gutterOver(91)"
><td class="source"># Option( "-r2", "rvec", [1,2,3], "some vector that does wonderful things and returns always segfaults")<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_92
onmouseover="gutterOver(92)"
><td class="source"> ]<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_93
onmouseover="gutterOver(93)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_94
onmouseover="gutterOver(94)"
><td class="source">files = [<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_95
onmouseover="gutterOver(95)"
><td class="source"> FileOption("-dgdl", "r",["xvg"],"run", "Input file with dH/dl values"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_96
onmouseover="gutterOver(96)"
><td class="source"> FileOption("-o", "w",["txt"],"results.txt", "Results"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_97
onmouseover="gutterOver(97)"
><td class="source"> FileOption("-oc", "w",["txt"],"convergence.txt", "text file with mutations to insert"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_98
onmouseover="gutterOver(98)"
><td class="source"> FileOption("-ob", "w",["txt"],"block.txt", "files with block averages"),<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_99
onmouseover="gutterOver(99)"
><td class="source"> <br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_100
onmouseover="gutterOver(100)"
><td class="source">]<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_101
onmouseover="gutterOver(101)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_102
onmouseover="gutterOver(102)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_103
onmouseover="gutterOver(103)"
><td class="source">cmdl = Commandline( sys.argv, options = options,<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_104
onmouseover="gutterOver(104)"
><td class="source"> fileoptions = files,<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_105
onmouseover="gutterOver(105)"
><td class="source"> program_desc = help_text,<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_106
onmouseover="gutterOver(106)"
><td class="source"> check_for_existing_files = False )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_107
onmouseover="gutterOver(107)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_108
onmouseover="gutterOver(108)"
><td class="source">dgdl_file = cmdl['-dgdl']<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_109
onmouseover="gutterOver(109)"
><td class="source">start_time = cmdl['-b']<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_110
onmouseover="gutterOver(110)"
><td class="source">end_time = cmdl['-e']<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_111
onmouseover="gutterOver(111)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_112
onmouseover="gutterOver(112)"
><td class="source">print 'DTI_analysis__> Reading: ', dgdl_file<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_113
onmouseover="gutterOver(113)"
><td class="source">print 'DTI_analysis__> Start time = ', start_time, ' End time = ', end_time<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_114
onmouseover="gutterOver(114)"
><td class="source">data = read_data( dgdl_file, b = start_time, e = end_time )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_115
onmouseover="gutterOver(115)"
><td class="source">av = average(data)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_116
onmouseover="gutterOver(116)"
><td class="source">st = std(data)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_117
onmouseover="gutterOver(117)"
><td class="source">size = len(data)<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_118
onmouseover="gutterOver(118)"
><td class="source">print 'DTI_analysis__> <dH/dl> = %8.4f'% av, ' | #data points = ', size<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_119
onmouseover="gutterOver(119)"
><td class="source">fp = open(cmdl['-o'],'w')<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_120
onmouseover="gutterOver(120)"
><td class="source">print >>fp, av, st, size<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_121
onmouseover="gutterOver(121)"
><td class="source">fp.close()<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_122
onmouseover="gutterOver(122)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_123
onmouseover="gutterOver(123)"
><td class="source">block1 = cmdl['-block1']<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_124
onmouseover="gutterOver(124)"
><td class="source">fn =os.path.splitext(cmdl['-ob'])[0]+str(block1)+os.path.splitext(cmdl['-ob'])[1]<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_125
onmouseover="gutterOver(125)"
><td class="source">print 'DTI_analysis__> Block averaging 1: ', block1<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_126
onmouseover="gutterOver(126)"
><td class="source">res = block_aver( data, block1 )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_127
onmouseover="gutterOver(127)"
><td class="source">fp = open(fn,'w')<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_128
onmouseover="gutterOver(128)"
><td class="source">for a, b in res:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_129
onmouseover="gutterOver(129)"
><td class="source"> print >>fp, a, b<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_130
onmouseover="gutterOver(130)"
><td class="source">fp.close()<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_131
onmouseover="gutterOver(131)"
><td class="source">block2 = cmdl['-block2']<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_132
onmouseover="gutterOver(132)"
><td class="source">fn =os.path.splitext(cmdl['-ob'])[0]+str(block2)+os.path.splitext(cmdl['-ob'])[1]<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_133
onmouseover="gutterOver(133)"
><td class="source">print 'DTI_analysis__> Block averaging 2: ', block2<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_134
onmouseover="gutterOver(134)"
><td class="source">res = block_aver( data, block2 )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_135
onmouseover="gutterOver(135)"
><td class="source">fp = open(fn,'w')<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_136
onmouseover="gutterOver(136)"
><td class="source">for a, b in res:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_137
onmouseover="gutterOver(137)"
><td class="source"> print >>fp, a, b<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_138
onmouseover="gutterOver(138)"
><td class="source">fp.close()<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_139
onmouseover="gutterOver(139)"
><td class="source"><br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_140
onmouseover="gutterOver(140)"
><td class="source">res = convergence( data, 100 )<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_141
onmouseover="gutterOver(141)"
><td class="source">fp = open(cmdl['-oc'],'w')<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_142
onmouseover="gutterOver(142)"
><td class="source">for t, r in res:<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_143
onmouseover="gutterOver(143)"
><td class="source"> print >>fp, t, r<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_144
onmouseover="gutterOver(144)"
><td class="source">fp.close()<br></td></tr
><tr
id=sl_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_145
onmouseover="gutterOver(145)"
><td class="source"><br></td></tr
></table></pre>
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
</td>
</tr></table>
<script type="text/javascript">
var lineNumUnderMouse = -1;
function gutterOver(num) {
gutterOut();
var newTR = document.getElementById('gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_' + num);
if (newTR) {
newTR.className = 'undermouse';
}
lineNumUnderMouse = num;
}
function gutterOut() {
if (lineNumUnderMouse != -1) {
var oldTR = document.getElementById(
'gr_svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b_' + lineNumUnderMouse);
if (oldTR) {
oldTR.className = '';
}
lineNumUnderMouse = -1;
}
}
var numsGenState = {table_base_id: 'nums_table_'};
var srcGenState = {table_base_id: 'src_table_'};
var alignerRunning = false;
var startOver = false;
function setLineNumberHeights() {
if (alignerRunning) {
startOver = true;
return;
}
numsGenState.chunk_id = 0;
numsGenState.table = document.getElementById('nums_table_0');
numsGenState.row_num = 0;
if (!numsGenState.table) {
return; // Silently exit if no file is present.
}
srcGenState.chunk_id = 0;
srcGenState.table = document.getElementById('src_table_0');
srcGenState.row_num = 0;
alignerRunning = true;
continueToSetLineNumberHeights();
}
function rowGenerator(genState) {
if (genState.row_num < genState.table.rows.length) {
var currentRow = genState.table.rows[genState.row_num];
genState.row_num++;
return currentRow;
}
var newTable = document.getElementById(
genState.table_base_id + (genState.chunk_id + 1));
if (newTable) {
genState.chunk_id++;
genState.row_num = 0;
genState.table = newTable;
return genState.table.rows[0];
}
return null;
}
var MAX_ROWS_PER_PASS = 1000;
function continueToSetLineNumberHeights() {
var rowsInThisPass = 0;
var numRow = 1;
var srcRow = 1;
while (numRow && srcRow && rowsInThisPass < MAX_ROWS_PER_PASS) {
numRow = rowGenerator(numsGenState);
srcRow = rowGenerator(srcGenState);
rowsInThisPass++;
if (numRow && srcRow) {
if (numRow.offsetHeight != srcRow.offsetHeight) {
numRow.firstChild.style.height = srcRow.offsetHeight + 'px';
}
}
}
if (rowsInThisPass >= MAX_ROWS_PER_PASS) {
setTimeout(continueToSetLineNumberHeights, 10);
} else {
alignerRunning = false;
if (startOver) {
startOver = false;
setTimeout(setLineNumberHeights, 500);
}
}
}
function initLineNumberHeights() {
// Do 2 complete passes, because there can be races
// between this code and prettify.
startOver = true;
setTimeout(setLineNumberHeights, 250);
window.onresize = setLineNumberHeights;
}
initLineNumberHeights();
</script>
<div id="log">
<div style="text-align:right">
<a class="ifCollapse" href="#" onclick="_toggleMeta(this); return false">Show details</a>
<a class="ifExpand" href="#" onclick="_toggleMeta(this); return false">Hide details</a>
</div>
<div class="ifExpand">
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="changelog">
<p>Change log</p>
<div>
<a href="/p/pmx/source/detail?spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b&r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b">82a17baf41be</a>
by Daniel Seeliger <[email protected]>
on Mar 22, 2013
<a href="/p/pmx/source/diff?spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b&r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&format=side&path=/scripts/DTI_analysis.py&old_path=/scripts/DTI_analysis.py&old=a2102ac8113476c16e34079d2812b130339f54bb">Diff</a>
</div>
<pre>changed pdb format (element column)
</pre>
</div>
<script type="text/javascript">
var detail_url = '/p/pmx/source/detail?r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b';
var publish_url = '/p/pmx/source/detail?r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b#publish';
// describe the paths of this revision in javascript.
var changed_paths = [];
var changed_urls = [];
changed_paths.push('/pmx/atom.py');
changed_urls.push('/p/pmx/source/browse/pmx/atom.py?r\x3d82a17baf41be6e3dd11997ac8d7dff4272c3a37b\x26spec\x3dsvn82a17baf41be6e3dd11997ac8d7dff4272c3a37b');
changed_paths.push('/pmx/library.py');
changed_urls.push('/p/pmx/source/browse/pmx/library.py?r\x3d82a17baf41be6e3dd11997ac8d7dff4272c3a37b\x26spec\x3dsvn82a17baf41be6e3dd11997ac8d7dff4272c3a37b');
changed_paths.push('/scripts/DTI_analysis.py');
changed_urls.push('/p/pmx/source/browse/scripts/DTI_analysis.py?r\x3d82a17baf41be6e3dd11997ac8d7dff4272c3a37b\x26spec\x3dsvn82a17baf41be6e3dd11997ac8d7dff4272c3a37b');
var selected_path = '/scripts/DTI_analysis.py';
changed_paths.push('/scripts/DTI_analysis2.py');
changed_urls.push('/p/pmx/source/browse/scripts/DTI_analysis2.py?r\x3d82a17baf41be6e3dd11997ac8d7dff4272c3a37b\x26spec\x3dsvn82a17baf41be6e3dd11997ac8d7dff4272c3a37b');
function getCurrentPageIndex() {
for (var i = 0; i < changed_paths.length; i++) {
if (selected_path == changed_paths[i]) {
return i;
}
}
}
function getNextPage() {
var i = getCurrentPageIndex();
if (i < changed_paths.length - 1) {
return changed_urls[i + 1];
}
return null;
}
function getPreviousPage() {
var i = getCurrentPageIndex();
if (i > 0) {
return changed_urls[i - 1];
}
return null;
}
function gotoNextPage() {
var page = getNextPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoPreviousPage() {
var page = getPreviousPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoDetailPage() {
window.location = detail_url;
}
function gotoPublishPage() {
window.location = publish_url;
}
</script>
<style type="text/css">
#review_nav {
border-top: 3px solid white;
padding-top: 6px;
margin-top: 1em;
}
#review_nav td {
vertical-align: middle;
}
#review_nav select {
margin: .5em 0;
}
</style>
<div id="review_nav">
<table><tr><td>Go to: </td><td>
<select name="files_in_rev" onchange="window.location=this.value">
<option value="/p/pmx/source/browse/pmx/atom.py?r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b"
>/pmx/atom.py</option>
<option value="/p/pmx/source/browse/pmx/library.py?r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b"
>/pmx/library.py</option>
<option value="/p/pmx/source/browse/scripts/DTI_analysis.py?r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b"
selected="selected"
>/scripts/DTI_analysis.py</option>
<option value="/p/pmx/source/browse/scripts/DTI_analysis2.py?r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b"
>/scripts/DTI_analysis2.py</option>
</select>
</td></tr></table>
<div id="review_instr" class="closed">
<a class="ifOpened" href="/p/pmx/source/detail?r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b&spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b#publish">Publish your comments</a>
<div class="ifClosed">Double click a line to add a comment</div>
</div>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="older_bubble">
<p>Older revisions</p>
<div class="closed" style="margin-bottom:3px;" >
<a class="ifClosed" onclick="return _toggleHidden(this)"><img src="https://ssl.gstatic.com/codesite/ph/images/plus.gif" ></a>
<a class="ifOpened" onclick="return _toggleHidden(this)"><img src="https://ssl.gstatic.com/codesite/ph/images/minus.gif" ></a>
<a href="/p/pmx/source/detail?spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b&r=a2102ac8113476c16e34079d2812b130339f54bb">a2102ac81134</a>
by Daniel Seeliger <[email protected]>
on Jan 3, 2013
<a href="/p/pmx/source/diff?spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b&r=a2102ac8113476c16e34079d2812b130339f54bb&format=side&path=/scripts/DTI_analysis.py&old_path=/scripts/DTI_analysis.py&old=1bc76bef6ad1f114f81454f2a6ed3f365c62f63e">Diff</a>
<br>
<pre class="ifOpened">changed MDP class
</pre>
</div>
<div class="closed" style="margin-bottom:3px;" >
<a class="ifClosed" onclick="return _toggleHidden(this)"><img src="https://ssl.gstatic.com/codesite/ph/images/plus.gif" ></a>
<a class="ifOpened" onclick="return _toggleHidden(this)"><img src="https://ssl.gstatic.com/codesite/ph/images/minus.gif" ></a>
<a href="/p/pmx/source/detail?spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b&r=1bc76bef6ad1f114f81454f2a6ed3f365c62f63e">1bc76bef6ad1</a>
by dseelig <[email protected]>
on Nov 26, 2012
<a href="/p/pmx/source/diff?spec=svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b&r=1bc76bef6ad1f114f81454f2a6ed3f365c62f63e&format=side&path=/scripts/DTI_analysis.py&old_path=/scripts/DTI_analysis.py&old=">Diff</a>
<br>
<pre class="ifOpened">initial commit
</pre>
</div>
<a href="/p/pmx/source/list?path=/scripts/DTI_analysis.py&r=82a17baf41be6e3dd11997ac8d7dff4272c3a37b">All revisions of this file</a>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="fileinfo_bubble">
<p>File info</p>
<div>Size: 4898 bytes,
145 lines</div>
<div><a href="//pmx.googlecode.com/git/scripts/DTI_analysis.py">View raw file</a></div>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
</div>
</div>
</div>
</div>
</div>
<script src="https://ssl.gstatic.com/codesite/ph/14689258884487974863/js/prettify/prettify.js"></script>
<script type="text/javascript">prettyPrint();</script>
<script src="https://ssl.gstatic.com/codesite/ph/14689258884487974863/js/source_file_scripts.js"></script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/14689258884487974863/js/kibbles.js"></script>
<script type="text/javascript">
var lastStop = null;
var initialized = false;
function updateCursor(next, prev) {
if (prev && prev.element) {
prev.element.className = 'cursor_stop cursor_hidden';
}
if (next && next.element) {
next.element.className = 'cursor_stop cursor';
lastStop = next.index;
}
}
function pubRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftDestroyed(data) {
updateCursorForCell(data.cellId, 'nocursor');
if (initialized) {
reloadCursors();
}
}
function reloadCursors() {
kibbles.skipper.reset();
loadCursors();
if (lastStop != null) {
kibbles.skipper.setCurrentStop(lastStop);
}
}
// possibly the simplest way to insert any newly added comments
// is to update the class of the corresponding cursor row,
// then refresh the entire list of rows.
function updateCursorForCell(cellId, className) {
var cell = document.getElementById(cellId);
// we have to go two rows back to find the cursor location
var row = getPreviousElement(cell.parentNode);
row.className = className;
}
// returns the previous element, ignores text nodes.
function getPreviousElement(e) {
var element = e.previousSibling;
if (element.nodeType == 3) {
element = element.previousSibling;
}
if (element && element.tagName) {
return element;
}
}
function loadCursors() {
// register our elements with skipper
var elements = CR_getElements('*', 'cursor_stop');
var len = elements.length;
for (var i = 0; i < len; i++) {
var element = elements[i];
element.className = 'cursor_stop cursor_hidden';
kibbles.skipper.append(element);
}
}
function toggleComments() {
CR_toggleCommentDisplay();
reloadCursors();
}
function keysOnLoadHandler() {
// setup skipper
kibbles.skipper.addStopListener(
kibbles.skipper.LISTENER_TYPE.PRE, updateCursor);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_top', 50);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_bottom', 100);
// Register our keys
kibbles.skipper.addFwdKey("n");
kibbles.skipper.addRevKey("p");
kibbles.keys.addKeyPressListener(
'u', function() { window.location = detail_url; });
kibbles.keys.addKeyPressListener(
'r', function() { window.location = detail_url + '#publish'; });
kibbles.keys.addKeyPressListener('j', gotoNextPage);
kibbles.keys.addKeyPressListener('k', gotoPreviousPage);
kibbles.keys.addKeyPressListener('h', toggleComments);
}
</script>
<script src="https://ssl.gstatic.com/codesite/ph/14689258884487974863/js/code_review_scripts.js"></script>
<script type="text/javascript">
function showPublishInstructions() {
var element = document.getElementById('review_instr');
if (element) {
element.className = 'opened';
}
}
var codereviews;
function revsOnLoadHandler() {
// register our source container with the commenting code
var paths = {'svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b': '/scripts/DTI_analysis.py'}
codereviews = CR_controller.setup(
{"token":"3Cd3YLziNQwHJ6q0INBaXA2gZls:1366032649547","projectName":"pmx","domainName":null,"assetHostPath":"https://ssl.gstatic.com/codesite/ph","loggedInUserEmail":"[email protected]","profileUrl":"/u/110130407061490526737/","assetVersionPath":"https://ssl.gstatic.com/codesite/ph/14689258884487974863","projectHomeUrl":"/p/pmx","relativeBaseUrl":""}, '', 'svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b', paths,
CR_BrowseIntegrationFactory);
// register our source container with the commenting code
// in this case we're registering the container and the revison
// associated with the contianer which may be the primary revision
// or may be a previous revision against which the primary revision
// of the file is being compared.
codereviews.registerSourceContainer(document.getElementById('lines'), 'svn82a17baf41be6e3dd11997ac8d7dff4272c3a37b');
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, showPublishInstructions);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_PUB_PLATE, pubRevealed);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, draftRevealed);
codereviews.registerActivityListener(CR_ActivityType.DISCARD_DRAFT_COMMENT, draftDestroyed);
var initialized = true;
reloadCursors();
}
window.onload = function() {keysOnLoadHandler(); revsOnLoadHandler();};
</script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/14689258884487974863/js/dit_scripts.js"></script>
<script type="text/javascript" src="https://ssl.gstatic.com/codesite/ph/14689258884487974863/js/ph_core.js"></script>
</div>
<div id="footer" dir="ltr">
<div class="text">
<a href="/projecthosting/terms.html">Terms</a> -
<a href="http://www.google.com/privacy.html">Privacy</a> -
<a href="/p/support/">Project Hosting Help</a>
</div>
</div>
<div class="hostedBy" style="margin-top: -20px;">
<span style="vertical-align: top;">Powered by <a href="http://code.google.com/projecthosting/">Google Project Hosting</a></span>
</div>
</body>
</html>
| lgpl-3.0 |
wwj718/ANALYSE | cms/envs/aws.py | 4 | 11519 | """
This is the default template for our main set of AWS servers.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
import json
from .common import *
from logsettings import get_logger_config
import os
from path import path
from dealer.git import git
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
############### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'cache'
CELERY_CACHE_BACKEND = 'celery'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############# NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += git.revision + "/"
# GITHUB_REPO_ROOT is the base directory
# for course data
GITHUB_REPO_ROOT = ENV_TOKENS.get('GITHUB_REPO_ROOT', GITHUB_REPO_ROOT)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / git.revision
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', EMAIL_HOST)
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', EMAIL_PORT)
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', EMAIL_USE_TLS)
LMS_BASE = ENV_TOKENS.get('LMS_BASE')
# Note that FEATURES['PREVIEW_LMS_BASE'] gets read in from the environment file.
SITE_NAME = ENV_TOKENS['SITE_NAME']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
#Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
#Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Push to LMS overrides
GIT_REPO_EXPORT_DIR = ENV_TOKENS.get('GIT_REPO_EXPORT_DIR', '/edx/var/edxapp/export_course_repos')
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', ENV_TOKENS.get('MITX_FEATURES', {}))
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
debug=False,
service_variant=SERVICE_VARIANT)
#theming start:
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'edX')
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
################ SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', EMAIL_HOST_PASSWORD)
# If Segment.io key specified, load it and turn on Segment.io if the feature flag is set
# Note that this is the Studio key. There is a separate key for the LMS.
SEGMENT_IO_KEY = AUTH_TOKENS.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = ENV_TOKENS.get('SEGMENT_IO', False)
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
DATABASES = AUTH_TOKENS['DATABASES']
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS['CONTENTSTORE']
DOC_STORE_CONFIG = AUTH_TOKENS['DOC_STORE_CONFIG']
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Celery Broker
CELERY_ALWAYS_EAGER = ENV_TOKENS.get("CELERY_ALWAYS_EAGER", False)
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS.update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
################ ADVANCED COMPONENT/PROBLEM TYPES ###############
ADVANCED_COMPONENT_TYPES = ENV_TOKENS.get('ADVANCED_COMPONENT_TYPES', ADVANCED_COMPONENT_TYPES)
ADVANCED_PROBLEM_TYPES = ENV_TOKENS.get('ADVANCED_PROBLEM_TYPES', ADVANCED_PROBLEM_TYPES)
| agpl-3.0 |
maohongyuan/kbengine | kbe/res/scripts/common/Lib/test/test_email/test__encoded_words.py | 123 | 6387 | import unittest
from email import _encoded_words as _ew
from email import errors
from test.test_email import TestEmailBase
class TestDecodeQ(TestEmailBase):
def _test(self, source, ex_result, ex_defects=[]):
result, defects = _ew.decode_q(source)
self.assertEqual(result, ex_result)
self.assertDefectsEqual(defects, ex_defects)
def test_no_encoded(self):
self._test(b'foobar', b'foobar')
def test_spaces(self):
self._test(b'foo=20bar=20', b'foo bar ')
self._test(b'foo_bar_', b'foo bar ')
def test_run_of_encoded(self):
self._test(b'foo=20=20=21=2Cbar', b'foo !,bar')
class TestDecodeB(TestEmailBase):
def _test(self, source, ex_result, ex_defects=[]):
result, defects = _ew.decode_b(source)
self.assertEqual(result, ex_result)
self.assertDefectsEqual(defects, ex_defects)
def test_simple(self):
self._test(b'Zm9v', b'foo')
def test_missing_padding(self):
self._test(b'dmk', b'vi', [errors.InvalidBase64PaddingDefect])
def test_invalid_character(self):
self._test(b'dm\x01k===', b'vi', [errors.InvalidBase64CharactersDefect])
def test_invalid_character_and_bad_padding(self):
self._test(b'dm\x01k', b'vi', [errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
class TestDecode(TestEmailBase):
def test_wrong_format_input_raises(self):
with self.assertRaises(ValueError):
_ew.decode('=?badone?=')
with self.assertRaises(ValueError):
_ew.decode('=?')
with self.assertRaises(ValueError):
_ew.decode('')
def _test(self, source, result, charset='us-ascii', lang='', defects=[]):
res, char, l, d = _ew.decode(source)
self.assertEqual(res, result)
self.assertEqual(char, charset)
self.assertEqual(l, lang)
self.assertDefectsEqual(d, defects)
def test_simple_q(self):
self._test('=?us-ascii?q?foo?=', 'foo')
def test_simple_b(self):
self._test('=?us-ascii?b?dmk=?=', 'vi')
def test_q_case_ignored(self):
self._test('=?us-ascii?Q?foo?=', 'foo')
def test_b_case_ignored(self):
self._test('=?us-ascii?B?dmk=?=', 'vi')
def test_non_trivial_q(self):
self._test('=?latin-1?q?=20F=fcr=20Elise=20?=', ' Für Elise ', 'latin-1')
def test_q_escaped_bytes_preserved(self):
self._test(b'=?us-ascii?q?=20\xACfoo?='.decode('us-ascii',
'surrogateescape'),
' \uDCACfoo',
defects = [errors.UndecodableBytesDefect])
def test_b_undecodable_bytes_ignored_with_defect(self):
self._test(b'=?us-ascii?b?dm\xACk?='.decode('us-ascii',
'surrogateescape'),
'vi',
defects = [
errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
def test_b_invalid_bytes_ignored_with_defect(self):
self._test('=?us-ascii?b?dm\x01k===?=',
'vi',
defects = [errors.InvalidBase64CharactersDefect])
def test_b_invalid_bytes_incorrect_padding(self):
self._test('=?us-ascii?b?dm\x01k?=',
'vi',
defects = [
errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
def test_b_padding_defect(self):
self._test('=?us-ascii?b?dmk?=',
'vi',
defects = [errors.InvalidBase64PaddingDefect])
def test_nonnull_lang(self):
self._test('=?us-ascii*jive?q?test?=', 'test', lang='jive')
def test_unknown_8bit_charset(self):
self._test('=?unknown-8bit?q?foo=ACbar?=',
b'foo\xacbar'.decode('ascii', 'surrogateescape'),
charset = 'unknown-8bit',
defects = [])
def test_unknown_charset(self):
self._test('=?foobar?q?foo=ACbar?=',
b'foo\xacbar'.decode('ascii', 'surrogateescape'),
charset = 'foobar',
# XXX Should this be a new Defect instead?
defects = [errors.CharsetError])
def test_q_nonascii(self):
self._test('=?utf-8?q?=C3=89ric?=',
'Éric',
charset='utf-8')
class TestEncodeQ(TestEmailBase):
def _test(self, src, expected):
self.assertEqual(_ew.encode_q(src), expected)
def test_all_safe(self):
self._test(b'foobar', 'foobar')
def test_spaces(self):
self._test(b'foo bar ', 'foo_bar_')
def test_run_of_encodables(self):
self._test(b'foo ,,bar', 'foo__=2C=2Cbar')
class TestEncodeB(TestEmailBase):
def test_simple(self):
self.assertEqual(_ew.encode_b(b'foo'), 'Zm9v')
def test_padding(self):
self.assertEqual(_ew.encode_b(b'vi'), 'dmk=')
class TestEncode(TestEmailBase):
def test_q(self):
self.assertEqual(_ew.encode('foo', 'utf-8', 'q'), '=?utf-8?q?foo?=')
def test_b(self):
self.assertEqual(_ew.encode('foo', 'utf-8', 'b'), '=?utf-8?b?Zm9v?=')
def test_auto_q(self):
self.assertEqual(_ew.encode('foo', 'utf-8'), '=?utf-8?q?foo?=')
def test_auto_q_if_short_mostly_safe(self):
self.assertEqual(_ew.encode('vi.', 'utf-8'), '=?utf-8?q?vi=2E?=')
def test_auto_b_if_enough_unsafe(self):
self.assertEqual(_ew.encode('.....', 'utf-8'), '=?utf-8?b?Li4uLi4=?=')
def test_auto_b_if_long_unsafe(self):
self.assertEqual(_ew.encode('vi.vi.vi.vi.vi.', 'utf-8'),
'=?utf-8?b?dmkudmkudmkudmkudmku?=')
def test_auto_q_if_long_mostly_safe(self):
self.assertEqual(_ew.encode('vi vi vi.vi ', 'utf-8'),
'=?utf-8?q?vi_vi_vi=2Evi_?=')
def test_utf8_default(self):
self.assertEqual(_ew.encode('foo'), '=?utf-8?q?foo?=')
def test_lang(self):
self.assertEqual(_ew.encode('foo', lang='jive'), '=?utf-8*jive?q?foo?=')
def test_unknown_8bit(self):
self.assertEqual(_ew.encode('foo\uDCACbar', charset='unknown-8bit'),
'=?unknown-8bit?q?foo=ACbar?=')
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
iamprakashom/offlineimap | offlineimap/folder/GmailMaildir.py | 10 | 13398 | # Maildir folder support with labels
# Copyright (C) 2002 - 2011 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from sys import exc_info
from .Maildir import MaildirFolder
from offlineimap import OfflineImapError
import offlineimap.accounts
from offlineimap import imaputil
class GmailMaildirFolder(MaildirFolder):
"""Folder implementation to support adding labels to messages in a Maildir.
"""
def __init__(self, root, name, sep, repository):
super(GmailMaildirFolder, self).__init__(root, name, sep, repository)
# The header under which labels are stored
self.labelsheader = self.repository.account.getconf('labelsheader', 'X-Keywords')
# enables / disables label sync
self.synclabels = self.repository.account.getconfboolean('synclabels', 0)
# if synclabels is enabled, add a 4th pass to sync labels
if self.synclabels:
self.syncmessagesto_passes.append(('syncing labels', self.syncmessagesto_labels))
def quickchanged(self, statusfolder):
"""Returns True if the Maildir has changed. Checks uids, flags and mtimes"""
self.cachemessagelist()
# Folder has different uids than statusfolder => TRUE
if sorted(self.getmessageuidlist()) != \
sorted(statusfolder.getmessageuidlist()):
return True
# check for flag changes, it's quick on a Maildir
for (uid, message) in self.getmessagelist().iteritems():
if message['flags'] != statusfolder.getmessageflags(uid):
return True
# check for newer mtimes. it is also fast
for (uid, message) in self.getmessagelist().iteritems():
if message['mtime'] > statusfolder.getmessagemtime(uid):
return True
return False #Nope, nothing changed
# Interface from BaseFolder
def msglist_item_initializer(self, uid):
return {'flags': set(), 'labels': set(), 'labels_cached': False,
'filename': '/no-dir/no-such-file/', 'mtime': 0}
def cachemessagelist(self, min_date=None, min_uid=None):
if self.ismessagelistempty():
self.messagelist = self._scanfolder(min_date=min_date, min_uid=min_uid)
# Get mtimes
if self.synclabels:
for uid, msg in self.messagelist.items():
filepath = os.path.join(self.getfullname(), msg['filename'])
msg['mtime'] = long(os.stat(filepath).st_mtime)
def getmessagelabels(self, uid):
# Labels are not cached in cachemessagelist because it is too slow.
if not self.messagelist[uid]['labels_cached']:
filename = self.messagelist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
if not os.path.exists(filepath):
return set()
file = open(filepath, 'rt')
content = file.read()
file.close()
self.messagelist[uid]['labels'] = set()
for hstr in self.getmessageheaderlist(content, self.labelsheader):
self.messagelist[uid]['labels'].update(
imaputil.labels_from_header(self.labelsheader, hstr))
self.messagelist[uid]['labels_cached'] = True
return self.messagelist[uid]['labels']
def getmessagemtime(self, uid):
if not 'mtime' in self.messagelist[uid]:
return 0
else:
return self.messagelist[uid]['mtime']
def savemessage(self, uid, content, flags, rtime):
"""Writes a new message, with the specified uid.
See folder/Base for detail. Note that savemessage() does not
check against dryrun settings, so you need to ensure that
savemessage is never called in a dryrun mode."""
if not self.synclabels:
return super(GmailMaildirFolder, self).savemessage(uid, content, flags, rtime)
labels = set()
for hstr in self.getmessageheaderlist(content, self.labelsheader):
labels.update(imaputil.labels_from_header(self.labelsheader, hstr))
ret = super(GmailMaildirFolder, self).savemessage(uid, content, flags, rtime)
# Update the mtime and labels
filename = self.messagelist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
self.messagelist[uid]['mtime'] = long(os.stat(filepath).st_mtime)
self.messagelist[uid]['labels'] = labels
return ret
def savemessagelabels(self, uid, labels, ignorelabels=set()):
"""Change a message's labels to `labels`.
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a dryrun mode."""
filename = self.messagelist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
file = open(filepath, 'rt')
content = file.read()
file.close()
oldlabels = set()
for hstr in self.getmessageheaderlist(content, self.labelsheader):
oldlabels.update(imaputil.labels_from_header(self.labelsheader, hstr))
labels = labels - ignorelabels
ignoredlabels = oldlabels & ignorelabels
oldlabels = oldlabels - ignorelabels
# Nothing to change
if labels == oldlabels:
return
# Change labels into content
labels_str = imaputil.format_labels_string(self.labelsheader,
sorted(labels | ignoredlabels))
# First remove old labels header, and then add the new one
content = self.deletemessageheaders(content, self.labelsheader)
content = self.addmessageheader(content, '\n', self.labelsheader, labels_str)
mtime = long(os.stat(filepath).st_mtime)
# write file with new labels to a unique file in tmp
messagename = self.new_message_filename(uid, set())
tmpname = self.save_to_tmp_file(messagename, content)
tmppath = os.path.join(self.getfullname(), tmpname)
# move to actual location
try:
os.rename(tmppath, filepath)
except OSError as e:
raise OfflineImapError("Can't rename file '%s' to '%s': %s" % \
(tmppath, filepath, e[1]), OfflineImapError.ERROR.FOLDER), \
None, exc_info()[2]
# if utime_from_header=true, we don't want to change the mtime.
if self.utime_from_header and mtime:
os.utime(filepath, (mtime, mtime))
# save the new mtime and labels
self.messagelist[uid]['mtime'] = long(os.stat(filepath).st_mtime)
self.messagelist[uid]['labels'] = labels
def copymessageto(self, uid, dstfolder, statusfolder, register = 1):
"""Copies a message from self to dst if needed, updating the status
Note that this function does not check against dryrun settings,
so you need to ensure that it is never called in a
dryrun mode.
:param uid: uid of the message to be copied.
:param dstfolder: A BaseFolder-derived instance
:param statusfolder: A LocalStatusFolder instance
:param register: whether we should register a new thread."
:returns: Nothing on success, or raises an Exception."""
# Check if we are really copying
realcopy = uid > 0 and not dstfolder.uidexists(uid)
# first copy the message
super(GmailMaildirFolder, self).copymessageto(uid, dstfolder, statusfolder, register)
# sync labels and mtime now when the message is new (the embedded labels are up to date,
# and have already propagated to the remote server.
# for message which already existed on the remote, this is useless, as later the labels may
# get updated.
if realcopy and self.synclabels:
try:
labels = dstfolder.getmessagelabels(uid)
statusfolder.savemessagelabels(uid, labels, mtime=self.getmessagemtime(uid))
# dstfolder is not GmailMaildir.
except NotImplementedError:
return
def syncmessagesto_labels(self, dstfolder, statusfolder):
"""Pass 4: Label Synchronization (Gmail only)
Compare label mismatches in self with those in statusfolder. If
msg has a valid UID and exists on dstfolder (has not e.g. been
deleted there), sync the labels change to both dstfolder and
statusfolder.
Also skips messages whose mtime remains the same as statusfolder, as the
contents have not changed.
This function checks and protects us from action in ryrun mode.
"""
# For each label, we store a list of uids to which it should be
# added. Then, we can call addmessageslabels() to apply them in
# bulk, rather than one call per message.
addlabellist = {}
dellabellist = {}
uidlist = []
try:
# filter uids (fast)
for uid in self.getmessageuidlist():
# bail out on CTRL-C or SIGTERM
if offlineimap.accounts.Account.abort_NOW_signal.is_set():
break
# Ignore messages with negative UIDs missed by pass 1 and
# don't do anything if the message has been deleted remotely
if uid < 0 or not dstfolder.uidexists(uid):
continue
selfmtime = self.getmessagemtime(uid)
if statusfolder.uidexists(uid):
statusmtime = statusfolder.getmessagemtime(uid)
else:
statusmtime = 0
if selfmtime > statusmtime:
uidlist.append(uid)
self.ui.collectingdata(uidlist, self)
# This can be slow if there is a lot of modified files
for uid in uidlist:
# bail out on CTRL-C or SIGTERM
if offlineimap.accounts.Account.abort_NOW_signal.is_set():
break
selflabels = self.getmessagelabels(uid)
if statusfolder.uidexists(uid):
statuslabels = statusfolder.getmessagelabels(uid)
else:
statuslabels = set()
addlabels = selflabels - statuslabels
dellabels = statuslabels - selflabels
for lb in addlabels:
if not lb in addlabellist:
addlabellist[lb] = []
addlabellist[lb].append(uid)
for lb in dellabels:
if not lb in dellabellist:
dellabellist[lb] = []
dellabellist[lb].append(uid)
for lb, uids in addlabellist.items():
# bail out on CTRL-C or SIGTERM
if offlineimap.accounts.Account.abort_NOW_signal.is_set():
break
self.ui.addinglabels(uids, lb, dstfolder)
if self.repository.account.dryrun:
continue #don't actually add in a dryrun
dstfolder.addmessageslabels(uids, set([lb]))
statusfolder.addmessageslabels(uids, set([lb]))
for lb, uids in dellabellist.items():
# bail out on CTRL-C or SIGTERM
if offlineimap.accounts.Account.abort_NOW_signal.is_set():
break
self.ui.deletinglabels(uids, lb, dstfolder)
if self.repository.account.dryrun:
continue #don't actually remove in a dryrun
dstfolder.deletemessageslabels(uids, set([lb]))
statusfolder.deletemessageslabels(uids, set([lb]))
# Update mtimes on StatusFolder. It is done last to be safe. If something els fails
# and the mtime is not updated, the labels will still be synced next time.
mtimes = {}
for uid in uidlist:
# bail out on CTRL-C or SIGTERM
if offlineimap.accounts.Account.abort_NOW_signal.is_set():
break
if self.repository.account.dryrun:
continue #don't actually update statusfolder
filename = self.messagelist[uid]['filename']
filepath = os.path.join(self.getfullname(), filename)
mtimes[uid] = long(os.stat(filepath).st_mtime)
# finally update statusfolder in a single DB transaction
statusfolder.savemessagesmtimebulk(mtimes)
except NotImplementedError:
self.ui.warn("Can't sync labels. You need to configure a remote repository of type Gmail.")
| gpl-2.0 |
PeterWangIntel/chromium-crosswalk | native_client_sdk/src/tools/tests/create_nmf_test.py | 3 | 19466 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import posixpath
import shutil
import subprocess
import sys
import tempfile
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
DATA_DIR = os.path.join(TOOLS_DIR, 'lib', 'tests', 'data')
BUILD_TOOLS_DIR = os.path.join(os.path.dirname(TOOLS_DIR), 'build_tools')
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(TOOLS_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, 'third_party', 'pymock')
# For the mock library
sys.path.append(MOCK_DIR)
sys.path.append(TOOLS_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import build_paths
import create_nmf
import getos
from mock import patch, Mock
TOOLCHAIN_OUT = os.path.join(build_paths.OUT_DIR, 'sdk_tests', 'toolchain')
NACL_X86_GLIBC_TOOLCHAIN = os.path.join(TOOLCHAIN_OUT,
'%s_x86' % getos.GetPlatform(),
'nacl_x86_glibc')
PosixRelPath = create_nmf.PosixRelPath
def StripSo(name):
"""Strip trailing hexidecimal characters from the name of a shared object.
It strips everything after the last '.' in the name, and checks that the new
name ends with .so.
e.g.
libc.so.ad6acbfa => libc.so
foo.bar.baz => foo.bar.baz
"""
stripped_name = '.'.join(name.split('.')[:-1])
if stripped_name.endswith('.so'):
return stripped_name
return name
class TestPosixRelPath(unittest.TestCase):
def testBasic(self):
# Note that PosixRelPath only converts from native path format to posix
# path format, that's why we have to use os.path.join here.
path = os.path.join(os.path.sep, 'foo', 'bar', 'baz.blah')
start = os.path.sep + 'foo'
self.assertEqual(PosixRelPath(path, start), 'bar/baz.blah')
class TestDefaultLibpath(unittest.TestCase):
def setUp(self):
patcher = patch('create_nmf.GetSDKRoot', Mock(return_value='/dummy/path'))
patcher.start()
self.addCleanup(patcher.stop)
def testUsesSDKRoot(self):
paths = create_nmf.GetDefaultLibPath('Debug')
for path in paths:
self.assertTrue(path.startswith('/dummy/path'))
def testIncludesNaClPorts(self):
paths = create_nmf.GetDefaultLibPath('Debug')
self.assertTrue(any(os.path.join('ports', 'lib') in p for p in paths),
"naclports libpath missing: %s" % str(paths))
class TestNmfUtils(unittest.TestCase):
"""Tests for the main NmfUtils class in create_nmf."""
def setUp(self):
self.tempdir = None
self.toolchain = NACL_X86_GLIBC_TOOLCHAIN
self.objdump = os.path.join(self.toolchain, 'bin', 'i686-nacl-objdump')
if os.name == 'nt':
self.objdump += '.exe'
self._Mktemp()
def _CreateTestNexe(self, name, arch):
"""Create an empty test .nexe file for use in create_nmf tests.
This is used rather than checking in test binaries since the
checked in binaries depend on .so files that only exist in the
certain SDK that build them.
"""
compiler = os.path.join(self.toolchain, 'bin', '%s-nacl-g++' % arch)
if os.name == 'nt':
compiler += '.exe'
os.environ['CYGWIN'] = 'nodosfilewarning'
program = 'int main() { return 0; }'
name = os.path.join(self.tempdir, name)
dst_dir = os.path.dirname(name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
cmd = [compiler, '-pthread', '-x' , 'c', '-o', name, '-']
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.communicate(input=program)
self.assertEqual(p.returncode, 0)
return name
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def _Mktemp(self):
self.tempdir = tempfile.mkdtemp()
def _CreateNmfUtils(self, nexes, **kwargs):
if not kwargs.get('lib_path'):
# Use lib instead of lib64 (lib64 is a symlink to lib).
kwargs['lib_path'] = [
os.path.join(self.toolchain, 'x86_64-nacl', 'lib'),
os.path.join(self.toolchain, 'x86_64-nacl', 'lib32')]
return create_nmf.NmfUtils(nexes,
objdump=self.objdump,
**kwargs)
def _CreateStatic(self, arch_path=None, **kwargs):
"""Copy all static .nexe files from the DATA_DIR to a temporary directory.
Args:
arch_path: A dictionary mapping architecture to the directory to generate
the .nexe for the architecture in.
kwargs: Keyword arguments to pass through to create_nmf.NmfUtils
constructor.
Returns:
A tuple with 2 elements:
* The generated NMF as a dictionary (i.e. parsed by json.loads)
* A list of the generated .nexe paths
"""
arch_path = arch_path or {}
nexes = []
for arch in ('x86_64', 'x86_32', 'arm'):
nexe_name = 'test_static_%s.nexe' % arch
src_nexe = os.path.join(DATA_DIR, nexe_name)
dst_nexe = os.path.join(self.tempdir, arch_path.get(arch, ''), nexe_name)
dst_dir = os.path.dirname(dst_nexe)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
shutil.copy(src_nexe, dst_nexe)
nexes.append(dst_nexe)
nexes.sort()
nmf_utils = self._CreateNmfUtils(nexes, **kwargs)
nmf = json.loads(nmf_utils.GetJson())
return nmf, nexes
def _CreateDynamicAndStageDeps(self, arch_path=None, **kwargs):
"""Create dynamic .nexe files and put them in a temporary directory, with
their dependencies staged in the same directory.
Args:
arch_path: A dictionary mapping architecture to the directory to generate
the .nexe for the architecture in.
kwargs: Keyword arguments to pass through to create_nmf.NmfUtils
constructor.
Returns:
A tuple with 2 elements:
* The generated NMF as a dictionary (i.e. parsed by json.loads)
* A list of the generated .nexe paths
"""
arch_path = arch_path or {}
nexes = []
for arch in ('x86_64', 'x86_32'):
nexe_name = 'test_dynamic_%s.nexe' % arch
rel_nexe = os.path.join(arch_path.get(arch, ''), nexe_name)
arch_alt = 'i686' if arch == 'x86_32' else arch
nexe = self._CreateTestNexe(rel_nexe, arch_alt)
nexes.append(nexe)
nexes.sort()
nmf_utils = self._CreateNmfUtils(nexes, **kwargs)
nmf = json.loads(nmf_utils.GetJson())
nmf_utils.StageDependencies(self.tempdir)
return nmf, nexes
def _CreatePexe(self, **kwargs):
"""Copy test.pexe from the DATA_DIR to a temporary directory.
Args:
kwargs: Keyword arguments to pass through to create_nmf.NmfUtils
constructor.
Returns:
A tuple with 2 elements:
* The generated NMF as a dictionary (i.e. parsed by json.loads)
* A list of the generated .pexe paths
"""
pexe_name = 'test.pexe'
src_pexe = os.path.join(DATA_DIR, pexe_name)
dst_pexe = os.path.join(self.tempdir, pexe_name)
shutil.copy(src_pexe, dst_pexe)
pexes = [dst_pexe]
nmf_utils = self._CreateNmfUtils(pexes, **kwargs)
nmf = json.loads(nmf_utils.GetJson())
return nmf, pexes
def _CreateBitCode(self, **kwargs):
"""Copy test.bc from the DATA_DIR to a temporary directory.
Args:
kwargs: Keyword arguments to pass through to create_nmf.NmfUtils
constructor.
Returns:
A tuple with 2 elements:
* The generated NMF as a dictionary (i.e. parsed by json.loads)
* A list of the generated .bc paths
"""
bc_name = 'test.bc'
src_bc = os.path.join(DATA_DIR, bc_name)
dst_bc = os.path.join(self.tempdir, bc_name)
shutil.copy(src_bc, dst_bc)
bcs = [dst_bc]
nmf_utils = self._CreateNmfUtils(bcs, **kwargs)
nmf = json.loads(nmf_utils.GetJson())
return nmf, bcs
def assertManifestEquals(self, manifest, expected):
"""Compare two manifest dictionaries.
The input manifest is regenerated with all string keys and values being
processed through StripSo, to remove the random hexidecimal characters at
the end of shared object names.
Args:
manifest: The generated manifest.
expected: The expected manifest.
"""
def StripSoCopyDict(d):
new_d = {}
for k, v in d.iteritems():
new_k = StripSo(k)
if isinstance(v, (str, unicode)):
new_v = StripSo(v)
elif type(v) is list:
new_v = v[:]
elif type(v) is dict:
new_v = StripSoCopyDict(v)
else:
# Assume that anything else can be copied directly.
new_v = v
new_d[new_k] = new_v
return new_d
self.assertEqual(StripSoCopyDict(manifest), expected)
def assertStagingEquals(self, expected):
"""Compare the contents of the temporary directory, to an expected
directory layout.
Args:
expected: The expected directory layout.
"""
all_files = []
for root, _, files in os.walk(self.tempdir):
rel_root_posix = PosixRelPath(root, self.tempdir)
for f in files:
path = posixpath.join(rel_root_posix, StripSo(f))
if path.startswith('./'):
path = path[2:]
all_files.append(path)
self.assertEqual(set(expected), set(all_files))
def testStatic(self):
nmf, _ = self._CreateStatic()
expected_manifest = {
'files': {},
'program': {
'x86-64': {'url': 'test_static_x86_64.nexe'},
'x86-32': {'url': 'test_static_x86_32.nexe'},
'arm': {'url': 'test_static_arm.nexe'},
}
}
self.assertManifestEquals(nmf, expected_manifest)
def testStaticWithPath(self):
arch_dir = {'x86_32': 'x86_32', 'x86_64': 'x86_64', 'arm': 'arm'}
nmf, _ = self._CreateStatic(arch_dir, nmf_root=self.tempdir)
expected_manifest = {
'files': {},
'program': {
'x86-32': {'url': 'x86_32/test_static_x86_32.nexe'},
'x86-64': {'url': 'x86_64/test_static_x86_64.nexe'},
'arm': {'url': 'arm/test_static_arm.nexe'},
}
}
self.assertManifestEquals(nmf, expected_manifest)
def testStaticWithPathNoNmfRoot(self):
# This case is not particularly useful, but it is similar to how create_nmf
# used to work. If there is no nmf_root given, all paths are relative to
# the first nexe passed on the commandline. I believe the assumption
# previously was that all .nexes would be in the same directory.
arch_dir = {'x86_32': 'x86_32', 'x86_64': 'x86_64', 'arm': 'arm'}
nmf, _ = self._CreateStatic(arch_dir)
expected_manifest = {
'files': {},
'program': {
'x86-32': {'url': '../x86_32/test_static_x86_32.nexe'},
'x86-64': {'url': '../x86_64/test_static_x86_64.nexe'},
'arm': {'url': 'test_static_arm.nexe'},
}
}
self.assertManifestEquals(nmf, expected_manifest)
def testStaticWithNexePrefix(self):
nmf, _ = self._CreateStatic(nexe_prefix='foo')
expected_manifest = {
'files': {},
'program': {
'x86-64': {'url': 'foo/test_static_x86_64.nexe'},
'x86-32': {'url': 'foo/test_static_x86_32.nexe'},
'arm': {'url': 'foo/test_static_arm.nexe'},
}
}
self.assertManifestEquals(nmf, expected_manifest)
def testDynamic(self):
nmf, nexes = self._CreateDynamicAndStageDeps()
expected_manifest = {
'files': {
'main.nexe': {
'x86-32': {'url': 'test_dynamic_x86_32.nexe'},
'x86-64': {'url': 'test_dynamic_x86_64.nexe'},
},
'libc.so': {
'x86-32': {'url': 'lib32/libc.so'},
'x86-64': {'url': 'lib64/libc.so'},
},
'libgcc_s.so': {
'x86-32': {'url': 'lib32/libgcc_s.so'},
'x86-64': {'url': 'lib64/libgcc_s.so'},
},
'libpthread.so': {
'x86-32': {'url': 'lib32/libpthread.so'},
'x86-64': {'url': 'lib64/libpthread.so'},
},
},
'program': {
'x86-32': {'url': 'lib32/runnable-ld.so'},
'x86-64': {'url': 'lib64/runnable-ld.so'},
}
}
expected_staging = [os.path.basename(f) for f in nexes]
expected_staging.extend([
'lib32/libc.so',
'lib32/libgcc_s.so',
'lib32/libpthread.so',
'lib32/runnable-ld.so',
'lib64/libc.so',
'lib64/libgcc_s.so',
'lib64/libpthread.so',
'lib64/runnable-ld.so'])
self.assertManifestEquals(nmf, expected_manifest)
self.assertStagingEquals(expected_staging)
def testDynamicWithPath(self):
arch_dir = {'x86_64': 'x86_64', 'x86_32': 'x86_32'}
nmf, nexes = self._CreateDynamicAndStageDeps(arch_dir,
nmf_root=self.tempdir)
expected_manifest = {
'files': {
'main.nexe': {
'x86-32': {'url': 'x86_32/test_dynamic_x86_32.nexe'},
'x86-64': {'url': 'x86_64/test_dynamic_x86_64.nexe'},
},
'libc.so': {
'x86-32': {'url': 'x86_32/lib32/libc.so'},
'x86-64': {'url': 'x86_64/lib64/libc.so'},
},
'libgcc_s.so': {
'x86-32': {'url': 'x86_32/lib32/libgcc_s.so'},
'x86-64': {'url': 'x86_64/lib64/libgcc_s.so'},
},
'libpthread.so': {
'x86-32': {'url': 'x86_32/lib32/libpthread.so'},
'x86-64': {'url': 'x86_64/lib64/libpthread.so'},
},
},
'program': {
'x86-32': {'url': 'x86_32/lib32/runnable-ld.so'},
'x86-64': {'url': 'x86_64/lib64/runnable-ld.so'},
}
}
expected_staging = [PosixRelPath(f, self.tempdir) for f in nexes]
expected_staging.extend([
'x86_32/lib32/libc.so',
'x86_32/lib32/libgcc_s.so',
'x86_32/lib32/libpthread.so',
'x86_32/lib32/runnable-ld.so',
'x86_64/lib64/libc.so',
'x86_64/lib64/libgcc_s.so',
'x86_64/lib64/libpthread.so',
'x86_64/lib64/runnable-ld.so'])
self.assertManifestEquals(nmf, expected_manifest)
self.assertStagingEquals(expected_staging)
def testDynamicWithRelPath(self):
"""Test that when the nmf root is a relative path that things work."""
arch_dir = {'x86_64': 'x86_64', 'x86_32': 'x86_32'}
old_path = os.getcwd()
try:
os.chdir(self.tempdir)
nmf, nexes = self._CreateDynamicAndStageDeps(arch_dir, nmf_root='')
expected_manifest = {
'files': {
'main.nexe': {
'x86-32': {'url': 'x86_32/test_dynamic_x86_32.nexe'},
'x86-64': {'url': 'x86_64/test_dynamic_x86_64.nexe'},
},
'libc.so': {
'x86-32': {'url': 'x86_32/lib32/libc.so'},
'x86-64': {'url': 'x86_64/lib64/libc.so'},
},
'libgcc_s.so': {
'x86-32': {'url': 'x86_32/lib32/libgcc_s.so'},
'x86-64': {'url': 'x86_64/lib64/libgcc_s.so'},
},
'libpthread.so': {
'x86-32': {'url': 'x86_32/lib32/libpthread.so'},
'x86-64': {'url': 'x86_64/lib64/libpthread.so'},
},
},
'program': {
'x86-32': {'url': 'x86_32/lib32/runnable-ld.so'},
'x86-64': {'url': 'x86_64/lib64/runnable-ld.so'},
}
}
expected_staging = [PosixRelPath(f, self.tempdir) for f in nexes]
expected_staging.extend([
'x86_32/lib32/libc.so',
'x86_32/lib32/libgcc_s.so',
'x86_32/lib32/libpthread.so',
'x86_32/lib32/runnable-ld.so',
'x86_64/lib64/libc.so',
'x86_64/lib64/libgcc_s.so',
'x86_64/lib64/libpthread.so',
'x86_64/lib64/runnable-ld.so'])
self.assertManifestEquals(nmf, expected_manifest)
self.assertStagingEquals(expected_staging)
finally:
os.chdir(old_path)
def testDynamicWithPathNoArchPrefix(self):
arch_dir = {'x86_64': 'x86_64', 'x86_32': 'x86_32'}
nmf, nexes = self._CreateDynamicAndStageDeps(arch_dir,
nmf_root=self.tempdir,
no_arch_prefix=True)
expected_manifest = {
'files': {
'main.nexe': {
'x86-32': {'url': 'x86_32/test_dynamic_x86_32.nexe'},
'x86-64': {'url': 'x86_64/test_dynamic_x86_64.nexe'},
},
'libc.so': {
'x86-32': {'url': 'x86_32/libc.so'},
'x86-64': {'url': 'x86_64/libc.so'},
},
'libgcc_s.so': {
'x86-32': {'url': 'x86_32/libgcc_s.so'},
'x86-64': {'url': 'x86_64/libgcc_s.so'},
},
'libpthread.so': {
'x86-32': {'url': 'x86_32/libpthread.so'},
'x86-64': {'url': 'x86_64/libpthread.so'},
},
},
'program': {
'x86-32': {'url': 'x86_32/runnable-ld.so'},
'x86-64': {'url': 'x86_64/runnable-ld.so'},
}
}
expected_staging = [PosixRelPath(f, self.tempdir) for f in nexes]
expected_staging.extend([
'x86_32/libc.so',
'x86_32/libgcc_s.so',
'x86_32/libpthread.so',
'x86_32/runnable-ld.so',
'x86_64/libc.so',
'x86_64/libgcc_s.so',
'x86_64/libpthread.so',
'x86_64/runnable-ld.so'])
self.assertManifestEquals(nmf, expected_manifest)
self.assertStagingEquals(expected_staging)
def testDynamicWithLibPrefix(self):
nmf, nexes = self._CreateDynamicAndStageDeps(lib_prefix='foo')
expected_manifest = {
'files': {
'main.nexe': {
'x86-32': {'url': 'test_dynamic_x86_32.nexe'},
'x86-64': {'url': 'test_dynamic_x86_64.nexe'},
},
'libc.so': {
'x86-32': {'url': 'foo/lib32/libc.so'},
'x86-64': {'url': 'foo/lib64/libc.so'},
},
'libgcc_s.so': {
'x86-32': {'url': 'foo/lib32/libgcc_s.so'},
'x86-64': {'url': 'foo/lib64/libgcc_s.so'},
},
'libpthread.so': {
'x86-32': {'url': 'foo/lib32/libpthread.so'},
'x86-64': {'url': 'foo/lib64/libpthread.so'},
},
},
'program': {
'x86-32': {'url': 'foo/lib32/runnable-ld.so'},
'x86-64': {'url': 'foo/lib64/runnable-ld.so'},
}
}
expected_staging = [PosixRelPath(f, self.tempdir) for f in nexes]
expected_staging.extend([
'foo/lib32/libc.so',
'foo/lib32/libgcc_s.so',
'foo/lib32/libpthread.so',
'foo/lib32/runnable-ld.so',
'foo/lib64/libc.so',
'foo/lib64/libgcc_s.so',
'foo/lib64/libpthread.so',
'foo/lib64/runnable-ld.so'])
self.assertManifestEquals(nmf, expected_manifest)
self.assertStagingEquals(expected_staging)
def testPexe(self):
nmf, _ = self._CreatePexe()
expected_manifest = {
'program': {
'portable': {
'pnacl-translate': {
'url': 'test.pexe'
}
}
}
}
self.assertManifestEquals(nmf, expected_manifest)
def testPexeOptLevel(self):
nmf, _ = self._CreatePexe(pnacl_optlevel=2)
expected_manifest = {
'program': {
'portable': {
'pnacl-translate': {
'url': 'test.pexe',
'optlevel': 2,
}
}
}
}
self.assertManifestEquals(nmf, expected_manifest)
def testBitCode(self):
nmf, _ = self._CreateBitCode(pnacl_debug_optlevel=0)
expected_manifest = {
'program': {
'portable': {
'pnacl-debug': {
'url': 'test.bc',
'optlevel': 0,
}
}
}
}
self.assertManifestEquals(nmf, expected_manifest)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
adityacs/ansible | lib/ansible/modules/network/nxos/nxos_mtu.py | 11 | 11446 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_mtu
version_added: "2.2"
short_description: Manages MTU settings on Nexus switch.
description:
- Manages MTU settings on Nexus switch.
author:
- Jason Edelman (@jedelman8)
notes:
- Either C(sysmtu) param is required or C(interface) AND C(mtu) params are req'd.
- C(state=absent) unconfigures a given MTU if that value is currently present.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1.
required: false
default: null
mtu:
description:
- MTU for a specific interface.
required: false
default: null
sysmtu:
description:
- System jumbo MTU.
required: false
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Ensure system mtu is 9126
- nxos_mtu:
sysmtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/1 (routed interface)
- nxos_mtu:
interface: Ethernet1/1
mtu: 1600
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/3 (switched interface)
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Unconfigure mtu on a given interface
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
state: absent
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mtu": "1700"}
existing:
description:
- k/v pairs of existing mtu/sysmtu on the interface/system
type: dict
sample: {"mtu": "1600", "sysmtu": "9216"}
end_state:
description: k/v pairs of mtu/sysmtu values after module execution
returned: always
type: dict
sample: {"mtu": "1700", sysmtu": "9216"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface vlan10", "mtu 1700"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_mtu(interface, module):
command = 'show interface {0}'.format(interface)
mtu = {}
body = execute_show_command(command, module)
try:
mtu_table = body[0]['TABLE_interface']['ROW_interface']
mtu['mtu'] = str(
mtu_table.get('eth_mtu',
mtu_table.get('svi_mtu', 'unreadable_via_api')))
mtu['sysmtu'] = get_system_mtu(module)['sysmtu']
except KeyError:
mtu = {}
return mtu
def get_system_mtu(module):
command = 'show run all | inc jumbomtu'
sysmtu = ''
body = execute_show_command(command, module, command_type='cli_show_ascii')
if body:
sysmtu = str(body[0].split(' ')[-1])
try:
sysmtu = int(sysmtu)
except:
sysmtu = ""
return dict(sysmtu=str(sysmtu))
def get_commands_config_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'mtu {mtu}',
'sysmtu': 'system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_commands_remove_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'no mtu {mtu}',
'sysmtu': 'no system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
if body == 'DNE':
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
mode = 'unknown'
interface_table = {}
body = execute_show_command(command, module)
try:
interface_table = body[0]['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
return mode
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode in ['access', 'trunk']:
mode = 'layer2'
elif mode == 'routed':
mode = 'layer3'
elif intf_type in ['loopback', 'svi']:
mode = 'layer3'
return mode
def main():
argument_spec = dict(
mtu=dict(type='str'),
interface=dict(type='str'),
sysmtu=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['mtu', 'interface']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
interface = module.params['interface']
mtu = module.params['mtu']
sysmtu = module.params['sysmtu']
state = module.params['state']
if sysmtu and (interface or mtu):
module.fail_json(msg='Proper usage-- either just use the sysmtu param '
'or use interface AND mtu params')
if interface:
intf_type = get_interface_type(interface)
if intf_type != 'ethernet':
if is_default(interface, module) == 'DNE':
module.fail_json(msg='Invalid interface. It does not exist '
'on the switch.')
existing = get_mtu(interface, module)
else:
existing = get_system_mtu(module)
if interface and mtu:
if intf_type == 'loopback':
module.fail_json(msg='Cannot set MTU for loopback interface.')
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
if intf_type in ['ethernet', 'portchannel']:
if mtu not in [existing['sysmtu'], '1500']:
module.fail_json(msg='MTU on L2 interfaces can only be set'
' to the system default (1500) or '
'existing sysmtu value which is '
' {0}'.format(existing['sysmtu']))
elif mode == 'layer3':
if intf_type in ['ethernet', 'portchannel', 'svi']:
if ((int(mtu) < 576 or int(mtu) > 9216) or
((int(mtu) % 2) != 0)):
module.fail_json(msg='Invalid MTU for Layer 3 interface'
'needs to be an even number between'
'576 and 9216')
if sysmtu:
if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or
((int(sysmtu) % 2) != 0))):
module.fail_json(msg='Invalid MTU- needs to be an even '
'number between 576 and 9216')
args = dict(mtu=mtu, sysmtu=sysmtu)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'present':
if delta:
command = get_commands_config_mtu(delta, interface)
commands.append(command)
elif state == 'absent':
common = set(proposed.items()).intersection(existing.items())
if common:
command = get_commands_remove_mtu(dict(common), interface)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
if interface:
end_state = get_mtu(interface, module)
else:
end_state = get_system_mtu(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
drufat/sympy | sympy/physics/mechanics/kane.py | 6 | 37043 | from __future__ import print_function, division
from sympy import zeros, Matrix, diff, solve_linear_system_LU, eye
from sympy.core.compatibility import range
from sympy.utilities import default_sort_key
from sympy.physics.vector import (ReferenceFrame, dynamicsymbols,
partial_velocity)
from sympy.physics.mechanics.particle import Particle
from sympy.physics.mechanics.rigidbody import RigidBody
from sympy.physics.mechanics.functions import (msubs, find_dynamicsymbols,
_f_list_parser)
from sympy.physics.mechanics.linearize import Linearizer
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iterable
__all__ = ['KanesMethod']
class KanesMethod(object):
"""Kane's method object.
This object is used to do the "book-keeping" as you go through and form
equations of motion in the way Kane presents in:
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
The attributes are for equations in the form [M] udot = forcing.
Attributes
==========
q, u : Matrix
Matrices of the generalized coordinates and speeds
bodylist : iterable
Iterable of Point and RigidBody objects in the system.
forcelist : iterable
Iterable of (Point, vector) or (ReferenceFrame, vector) tuples
describing the forces on the system.
auxiliary : Matrix
If applicable, the set of auxiliary Kane's
equations used to solve for non-contributing
forces.
mass_matrix : Matrix
The system's mass matrix
forcing : Matrix
The system's forcing vector
mass_matrix_full : Matrix
The "mass matrix" for the u's and q's
forcing_full : Matrix
The "forcing vector" for the u's and q's
Examples
========
This is a simple example for a one degree of freedom translational
spring-mass-damper.
In this example, we first need to do the kinematics.
This involves creating generalized speeds and coordinates and their
derivatives.
Then we create a point and set its velocity in a frame.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame
>>> from sympy.physics.mechanics import Point, Particle, KanesMethod
>>> q, u = dynamicsymbols('q u')
>>> qd, ud = dynamicsymbols('q u', 1)
>>> m, c, k = symbols('m c k')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
Next we need to arrange/store information in the way that KanesMethod
requires. The kinematic differential equations need to be stored in a
dict. A list of forces/torques must be constructed, where each entry in
the list is a (Point, Vector) or (ReferenceFrame, Vector) tuple, where the
Vectors represent the Force or Torque.
Next a particle needs to be created, and it needs to have a point and mass
assigned to it.
Finally, a list of all bodies and particles needs to be created.
>>> kd = [qd - u]
>>> FL = [(P, (-k * q - c * u) * N.x)]
>>> pa = Particle('pa', P, m)
>>> BL = [pa]
Finally we can generate the equations of motion.
First we create the KanesMethod object and supply an inertial frame,
coordinates, generalized speeds, and the kinematic differential equations.
Additional quantities such as configuration and motion constraints,
dependent coordinates and speeds, and auxiliary speeds are also supplied
here (see the online documentation).
Next we form FR* and FR to complete: Fr + Fr* = 0.
We have the equations of motion at this point.
It makes sense to rearrnge them though, so we calculate the mass matrix and
the forcing terms, for E.o.M. in the form: [MM] udot = forcing, where MM is
the mass matrix, udot is a vector of the time derivatives of the
generalized speeds, and forcing is a vector representing "forcing" terms.
>>> KM = KanesMethod(N, q_ind=[q], u_ind=[u], kd_eqs=kd)
>>> (fr, frstar) = KM.kanes_equations(BL, FL)
>>> MM = KM.mass_matrix
>>> forcing = KM.forcing
>>> rhs = MM.inv() * forcing
>>> rhs
Matrix([[(-c*u(t) - k*q(t))/m]])
>>> KM.linearize(A_and_B=True, new_method=True)[0]
Matrix([
[ 0, 1],
[-k/m, -c/m]])
Please look at the documentation pages for more information on how to
perform linearization and how to deal with dependent coordinates & speeds,
and how do deal with bringing non-contributing forces into evidence.
"""
def __init__(self, frame, q_ind, u_ind, kd_eqs=None, q_dependent=None,
configuration_constraints=None, u_dependent=None,
velocity_constraints=None, acceleration_constraints=None,
u_auxiliary=None):
"""Please read the online documentation. """
if not isinstance(frame, ReferenceFrame):
raise TypeError('An intertial ReferenceFrame must be supplied')
self._inertial = frame
self._fr = None
self._frstar = None
self._forcelist = None
self._bodylist = None
self._initialize_vectors(q_ind, q_dependent, u_ind, u_dependent,
u_auxiliary)
self._initialize_kindiffeq_matrices(kd_eqs)
self._initialize_constraint_matrices(configuration_constraints,
velocity_constraints, acceleration_constraints)
def _initialize_vectors(self, q_ind, q_dep, u_ind, u_dep, u_aux):
"""Initialize the coordinate and speed vectors."""
none_handler = lambda x: Matrix(x) if x else Matrix()
# Initialize generalized coordinates
q_dep = none_handler(q_dep)
if not iterable(q_ind):
raise TypeError('Generalized coordinates must be an iterable.')
if not iterable(q_dep):
raise TypeError('Dependent coordinates must be an iterable.')
q_ind = Matrix(q_ind)
self._qdep = q_dep
self._q = Matrix([q_ind, q_dep])
self._qdot = self.q.diff(dynamicsymbols._t)
# Initialize generalized speeds
u_dep = none_handler(u_dep)
if not iterable(u_ind):
raise TypeError('Generalized speeds must be an iterable.')
if not iterable(u_dep):
raise TypeError('Dependent speeds must be an iterable.')
u_ind = Matrix(u_ind)
self._udep = u_dep
self._u = Matrix([u_ind, u_dep])
self._udot = self.u.diff(dynamicsymbols._t)
self._uaux = none_handler(u_aux)
def _initialize_constraint_matrices(self, config, vel, acc):
"""Initializes constraint matrices."""
# Define vector dimensions
o = len(self.u)
m = len(self._udep)
p = o - m
none_handler = lambda x: Matrix(x) if x else Matrix()
# Initialize configuration constraints
config = none_handler(config)
if len(self._qdep) != len(config):
raise ValueError('There must be an equal number of dependent '
'coordinates and configuration constraints.')
self._f_h = none_handler(config)
# Initialize velocity and acceleration constraints
vel = none_handler(vel)
acc = none_handler(acc)
if len(vel) != m:
raise ValueError('There must be an equal number of dependent '
'speeds and velocity constraints.')
if acc and (len(acc) != m):
raise ValueError('There must be an equal number of dependent '
'speeds and acceleration constraints.')
if vel:
u_zero = dict((i, 0) for i in self.u)
udot_zero = dict((i, 0) for i in self._udot)
# When calling kanes_equations, another class instance will be
# created if auxiliary u's are present. In this case, the
# computation of kinetic differential equation matrices will be
# skipped as this was computed during the original KanesMethod
# object, and the qd_u_map will not be available.
if self._qdot_u_map is not None:
vel = msubs(vel, self._qdot_u_map)
self._f_nh = msubs(vel, u_zero)
self._k_nh = (vel - self._f_nh).jacobian(self.u)
# If no acceleration constraints given, calculate them.
if not acc:
self._f_dnh = (self._k_nh.diff(dynamicsymbols._t) * self.u +
self._f_nh.diff(dynamicsymbols._t))
self._k_dnh = self._k_nh
else:
if self._qdot_u_map is not None:
acc = msubs(acc, self._qdot_u_map)
self._f_dnh = msubs(acc, udot_zero)
self._k_dnh = (acc - self._f_dnh).jacobian(self._udot)
# Form of non-holonomic constraints is B*u + C = 0.
# We partition B into independent and dependent columns:
# Ars is then -B_dep.inv() * B_ind, and it relates dependent speeds
# to independent speeds as: udep = Ars*uind, neglecting the C term.
B_ind = self._k_nh[:, :p]
B_dep = self._k_nh[:, p:o]
self._Ars = -B_dep.LUsolve(B_ind)
else:
self._f_nh = Matrix()
self._k_nh = Matrix()
self._f_dnh = Matrix()
self._k_dnh = Matrix()
self._Ars = Matrix()
def _initialize_kindiffeq_matrices(self, kdeqs):
"""Initialize the kinematic differential equation matrices."""
if kdeqs:
if len(self.q) != len(kdeqs):
raise ValueError('There must be an equal number of kinematic '
'differential equations and coordinates.')
kdeqs = Matrix(kdeqs)
u = self.u
qdot = self._qdot
# Dictionaries setting things to zero
u_zero = dict((i, 0) for i in u)
uaux_zero = dict((i, 0) for i in self._uaux)
qdot_zero = dict((i, 0) for i in qdot)
f_k = msubs(kdeqs, u_zero, qdot_zero)
k_ku = (msubs(kdeqs, qdot_zero) - f_k).jacobian(u)
k_kqdot = (msubs(kdeqs, u_zero) - f_k).jacobian(qdot)
f_k = k_kqdot.LUsolve(f_k)
k_ku = k_kqdot.LUsolve(k_ku)
k_kqdot = eye(len(qdot))
self._qdot_u_map = solve_linear_system_LU(
Matrix([k_kqdot.T, -(k_ku * u + f_k).T]).T, qdot)
self._f_k = msubs(f_k, uaux_zero)
self._k_ku = msubs(k_ku, uaux_zero)
self._k_kqdot = k_kqdot
else:
self._qdot_u_map = None
self._f_k = Matrix()
self._k_ku = Matrix()
self._k_kqdot = Matrix()
def _form_fr(self, fl):
"""Form the generalized active force."""
if fl != None and (len(fl) == 0 or not iterable(fl)):
raise ValueError('Force pairs must be supplied in an '
'non-empty iterable or None.')
N = self._inertial
# pull out relevant velocities for constructing partial velocities
vel_list, f_list = _f_list_parser(fl, N)
vel_list = [msubs(i, self._qdot_u_map) for i in vel_list]
# Fill Fr with dot product of partial velocities and forces
o = len(self.u)
b = len(f_list)
FR = zeros(o, 1)
partials = partial_velocity(vel_list, self.u, N)
for i in range(o):
FR[i] = sum(partials[j][i] & f_list[j] for j in range(b))
# In case there are dependent speeds
if self._udep:
p = o - len(self._udep)
FRtilde = FR[:p, 0]
FRold = FR[p:o, 0]
FRtilde += self._Ars.T * FRold
FR = FRtilde
self._forcelist = fl
self._fr = FR
return FR
def _form_frstar(self, bl):
"""Form the generalized inertia force."""
if not iterable(bl):
raise TypeError('Bodies must be supplied in an iterable.')
t = dynamicsymbols._t
N = self._inertial
# Dicts setting things to zero
udot_zero = dict((i, 0) for i in self._udot)
uaux_zero = dict((i, 0) for i in self._uaux)
uauxdot = [diff(i, t) for i in self._uaux]
uauxdot_zero = dict((i, 0) for i in uauxdot)
# Dictionary of q' and q'' to u and u'
q_ddot_u_map = dict((k.diff(t), v.diff(t)) for (k, v) in
self._qdot_u_map.items())
q_ddot_u_map.update(self._qdot_u_map)
# Fill up the list of partials: format is a list with num elements
# equal to number of entries in body list. Each of these elements is a
# list - either of length 1 for the translational components of
# particles or of length 2 for the translational and rotational
# components of rigid bodies. The inner most list is the list of
# partial velocities.
def get_partial_velocity(body):
if isinstance(body, RigidBody):
vlist = [body.masscenter.vel(N), body.frame.ang_vel_in(N)]
elif isinstance(body, Particle):
vlist = [body.point.vel(N),]
else:
raise TypeError('The body list may only contain either '
'RigidBody or Particle as list elements.')
v = [msubs(vel, self._qdot_u_map) for vel in vlist]
return partial_velocity(v, self.u, N)
partials = [get_partial_velocity(body) for body in bl]
# Compute fr_star in two components:
# fr_star = -(MM*u' + nonMM)
o = len(self.u)
MM = zeros(o, o)
nonMM = zeros(o, 1)
zero_uaux = lambda expr: msubs(expr, uaux_zero)
zero_udot_uaux = lambda expr: msubs(msubs(expr, udot_zero), uaux_zero)
for i, body in enumerate(bl):
if isinstance(body, RigidBody):
M = zero_uaux(body.mass)
I = zero_uaux(body.central_inertia)
vel = zero_uaux(body.masscenter.vel(N))
omega = zero_uaux(body.frame.ang_vel_in(N))
acc = zero_udot_uaux(body.masscenter.acc(N))
inertial_force = (M.diff(t) * vel + M * acc)
inertial_torque = zero_uaux((I.dt(body.frame) & omega) +
msubs(I & body.frame.ang_acc_in(N), udot_zero) +
(omega ^ (I & omega)))
for j in range(o):
tmp_vel = zero_uaux(partials[i][0][j])
tmp_ang = zero_uaux(I & partials[i][1][j])
for k in range(o):
# translational
MM[j, k] += M * (tmp_vel & partials[i][0][k])
# rotational
MM[j, k] += (tmp_ang & partials[i][1][k])
nonMM[j] += inertial_force & partials[i][0][j]
nonMM[j] += inertial_torque & partials[i][1][j]
else:
M = zero_uaux(body.mass)
vel = zero_uaux(body.point.vel(N))
acc = zero_udot_uaux(body.point.acc(N))
inertial_force = (M.diff(t) * vel + M * acc)
for j in range(o):
temp = zero_uaux(partials[i][0][j])
for k in range(o):
MM[j, k] += M * (temp & partials[i][0][k])
nonMM[j] += inertial_force & partials[i][0][j]
# Compose fr_star out of MM and nonMM
MM = zero_uaux(msubs(MM, q_ddot_u_map))
nonMM = msubs(msubs(nonMM, q_ddot_u_map),
udot_zero, uauxdot_zero, uaux_zero)
fr_star = -(MM * msubs(Matrix(self._udot), uauxdot_zero) + nonMM)
# If there are dependent speeds, we need to find fr_star_tilde
if self._udep:
p = o - len(self._udep)
fr_star_ind = fr_star[:p, 0]
fr_star_dep = fr_star[p:o, 0]
fr_star = fr_star_ind + (self._Ars.T * fr_star_dep)
# Apply the same to MM
MMi = MM[:p, :]
MMd = MM[p:o, :]
MM = MMi + (self._Ars.T * MMd)
self._bodylist = bl
self._frstar = fr_star
self._k_d = MM
self._f_d = -msubs(self._fr + self._frstar, udot_zero)
return fr_star
def to_linearizer(self):
"""Returns an instance of the Linearizer class, initiated from the
data in the KanesMethod class. This may be more desirable than using
the linearize class method, as the Linearizer object will allow more
efficient recalculation (i.e. about varying operating points)."""
if (self._fr is None) or (self._frstar is None):
raise ValueError('Need to compute Fr, Fr* first.')
# Get required equation components. The Kane's method class breaks
# these into pieces. Need to reassemble
f_c = self._f_h
if self._f_nh and self._k_nh:
f_v = self._f_nh + self._k_nh*Matrix(self.u)
else:
f_v = Matrix()
if self._f_dnh and self._k_dnh:
f_a = self._f_dnh + self._k_dnh*Matrix(self._udot)
else:
f_a = Matrix()
# Dicts to sub to zero, for splitting up expressions
u_zero = dict((i, 0) for i in self.u)
ud_zero = dict((i, 0) for i in self._udot)
qd_zero = dict((i, 0) for i in self._qdot)
qd_u_zero = dict((i, 0) for i in Matrix([self._qdot, self.u]))
# Break the kinematic differential eqs apart into f_0 and f_1
f_0 = msubs(self._f_k, u_zero) + self._k_kqdot*Matrix(self._qdot)
f_1 = msubs(self._f_k, qd_zero) + self._k_ku*Matrix(self.u)
# Break the dynamic differential eqs into f_2 and f_3
f_2 = msubs(self._frstar, qd_u_zero)
f_3 = msubs(self._frstar, ud_zero) + self._fr
f_4 = zeros(len(f_2), 1)
# Get the required vector components
q = self.q
u = self.u
if self._qdep:
q_i = q[:-len(self._qdep)]
else:
q_i = q
q_d = self._qdep
if self._udep:
u_i = u[:-len(self._udep)]
else:
u_i = u
u_d = self._udep
# Form dictionary to set auxiliary speeds & their derivatives to 0.
uaux = self._uaux
uauxdot = uaux.diff(dynamicsymbols._t)
uaux_zero = dict((i, 0) for i in Matrix([uaux, uauxdot]))
# Checking for dynamic symbols outside the dynamic differential
# equations; throws error if there is.
sym_list = set(Matrix([q, self._qdot, u, self._udot, uaux, uauxdot]))
if any(find_dynamicsymbols(i, sym_list) for i in [self._k_kqdot,
self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]):
raise ValueError('Cannot have dynamicsymbols outside dynamic \
forcing vector.')
# Find all other dynamic symbols, forming the forcing vector r.
# Sort r to make it canonical.
r = list(find_dynamicsymbols(msubs(self._f_d, uaux_zero), sym_list))
r.sort(key=default_sort_key)
# Check for any derivatives of variables in r that are also found in r.
for i in r:
if diff(i, dynamicsymbols._t) in r:
raise ValueError('Cannot have derivatives of specified \
quantities when linearizing forcing terms.')
return Linearizer(f_0, f_1, f_2, f_3, f_4, f_c, f_v, f_a, q, u, q_i,
q_d, u_i, u_d, r)
def linearize(self, **kwargs):
""" Linearize the equations of motion about a symbolic operating point.
If kwarg A_and_B is False (default), returns M, A, B, r for the
linearized form, M*[q', u']^T = A*[q_ind, u_ind]^T + B*r.
If kwarg A_and_B is True, returns A, B, r for the linearized form
dx = A*x + B*r, where x = [q_ind, u_ind]^T. Note that this is
computationally intensive if there are many symbolic parameters. For
this reason, it may be more desirable to use the default A_and_B=False,
returning M, A, and B. Values may then be substituted in to these
matrices, and the state space form found as
A = P.T*M.inv()*A, B = P.T*M.inv()*B, where P = Linearizer.perm_mat.
In both cases, r is found as all dynamicsymbols in the equations of
motion that are not part of q, u, q', or u'. They are sorted in
canonical form.
The operating points may be also entered using the ``op_point`` kwarg.
This takes a dictionary of {symbol: value}, or a an iterable of such
dictionaries. The values may be numberic or symbolic. The more values
you can specify beforehand, the faster this computation will run.
As part of the deprecation cycle, the new method will not be used unless
the kwarg ``new_method`` is set to True. If the kwarg is missing, or set
to false, the old linearization method will be used. After next release
the need for this kwarg will be removed.
For more documentation, please see the ``Linearizer`` class."""
if 'new_method' not in kwargs or not kwargs['new_method']:
# User is still using old code.
SymPyDeprecationWarning('The linearize class method has changed '
'to a new interface, the old method is deprecated. To '
'use the new method, set the kwarg `new_method=True`. '
'For more information, read the docstring '
'of `linearize`.').warn()
return self._old_linearize()
# Remove the new method flag, before passing kwargs to linearize
kwargs.pop('new_method')
linearizer = self.to_linearizer()
result = linearizer.linearize(**kwargs)
return result + (linearizer.r,)
def _old_linearize(self):
"""Old method to linearize the equations of motion. Returns a tuple of
(f_lin_A, f_lin_B, y) for forming [M]qudot = [f_lin_A]qu + [f_lin_B]y.
Deprecated in favor of new method using Linearizer class. Please change
your code to use the new `linearize` method."""
if (self._fr is None) or (self._frstar is None):
raise ValueError('Need to compute Fr, Fr* first.')
# Note that this is now unneccessary, and it should never be
# encountered; I still think it should be in here in case the user
# manually sets these matrices incorrectly.
for i in self.q:
if self._k_kqdot.diff(i) != 0 * self._k_kqdot:
raise ValueError('Matrix K_kqdot must not depend on any q.')
t = dynamicsymbols._t
uaux = self._uaux
uauxdot = [diff(i, t) for i in uaux]
# dictionary of auxiliary speeds & derivatives which are equal to zero
subdict = dict(zip(uaux[:] + uauxdot[:],
[0] * (len(uaux) + len(uauxdot))))
# Checking for dynamic symbols outside the dynamic differential
# equations; throws error if there is.
insyms = set(self.q[:] + self._qdot[:] + self.u[:] + self._udot[:] +
uaux[:] + uauxdot)
if any(find_dynamicsymbols(i, insyms) for i in [self._k_kqdot,
self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]):
raise ValueError('Cannot have dynamicsymbols outside dynamic \
forcing vector.')
other_dyns = list(find_dynamicsymbols(msubs(self._f_d, subdict), insyms))
# make it canonically ordered so the jacobian is canonical
other_dyns.sort(key=default_sort_key)
for i in other_dyns:
if diff(i, dynamicsymbols._t) in other_dyns:
raise ValueError('Cannot have derivatives of specified '
'quantities when linearizing forcing terms.')
o = len(self.u) # number of speeds
n = len(self.q) # number of coordinates
l = len(self._qdep) # number of configuration constraints
m = len(self._udep) # number of motion constraints
qi = Matrix(self.q[: n - l]) # independent coords
qd = Matrix(self.q[n - l: n]) # dependent coords; could be empty
ui = Matrix(self.u[: o - m]) # independent speeds
ud = Matrix(self.u[o - m: o]) # dependent speeds; could be empty
qdot = Matrix(self._qdot) # time derivatives of coordinates
# with equations in the form MM udot = forcing, expand that to:
# MM_full [q,u].T = forcing_full. This combines coordinates and
# speeds together for the linearization, which is necessary for the
# linearization process, due to dependent coordinates. f1 is the rows
# from the kinematic differential equations, f2 is the rows from the
# dynamic differential equations (and differentiated non-holonomic
# constraints).
f1 = self._k_ku * Matrix(self.u) + self._f_k
f2 = self._f_d
# Only want to do this if these matrices have been filled in, which
# occurs when there are dependent speeds
if m != 0:
f2 = self._f_d.col_join(self._f_dnh)
fnh = self._f_nh + self._k_nh * Matrix(self.u)
f1 = msubs(f1, subdict)
f2 = msubs(f2, subdict)
fh = msubs(self._f_h, subdict)
fku = msubs(self._k_ku * Matrix(self.u), subdict)
fkf = msubs(self._f_k, subdict)
# In the code below, we are applying the chain rule by hand on these
# things. All the matrices have been changed into vectors (by
# multiplying the dynamic symbols which it is paired with), so we can
# take the jacobian of them. The basic operation is take the jacobian
# of the f1, f2 vectors wrt all of the q's and u's. f1 is a function of
# q, u, and t; f2 is a function of q, qdot, u, and t. In the code
# below, we are not considering perturbations in t. So if f1 is a
# function of the q's, u's but some of the q's or u's could be
# dependent on other q's or u's (qd's might be dependent on qi's, ud's
# might be dependent on ui's or qi's), so what we do is take the
# jacobian of the f1 term wrt qi's and qd's, the jacobian wrt the qd's
# gets multiplied by the jacobian of qd wrt qi, this is extended for
# the ud's as well. dqd_dqi is computed by taking a taylor expansion of
# the holonomic constraint equations about q*, treating q* - q as dq,
# separating into dqd (depedent q's) and dqi (independent q's) and the
# rearranging for dqd/dqi. This is again extended for the speeds.
# First case: configuration and motion constraints
if (l != 0) and (m != 0):
fh_jac_qi = fh.jacobian(qi)
fh_jac_qd = fh.jacobian(qd)
fnh_jac_qi = fnh.jacobian(qi)
fnh_jac_qd = fnh.jacobian(qd)
fnh_jac_ui = fnh.jacobian(ui)
fnh_jac_ud = fnh.jacobian(ud)
fku_jac_qi = fku.jacobian(qi)
fku_jac_qd = fku.jacobian(qd)
fku_jac_ui = fku.jacobian(ui)
fku_jac_ud = fku.jacobian(ud)
fkf_jac_qi = fkf.jacobian(qi)
fkf_jac_qd = fkf.jacobian(qd)
f1_jac_qi = f1.jacobian(qi)
f1_jac_qd = f1.jacobian(qd)
f1_jac_ui = f1.jacobian(ui)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qi = f2.jacobian(qi)
f2_jac_qd = f2.jacobian(qd)
f2_jac_ui = f2.jacobian(ui)
f2_jac_ud = f2.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
dqd_dqi = - fh_jac_qd.LUsolve(fh_jac_qi)
dud_dqi = fnh_jac_ud.LUsolve(fnh_jac_qd * dqd_dqi - fnh_jac_qi)
dud_dui = - fnh_jac_ud.LUsolve(fnh_jac_ui)
dqdot_dui = - self._k_kqdot.inv() * (fku_jac_ui +
fku_jac_ud * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku_jac_qi + fkf_jac_qi +
(fku_jac_qd + fkf_jac_qd) * dqd_dqi + fku_jac_ud * dud_dqi)
f1_q = f1_jac_qi + f1_jac_qd * dqd_dqi + f1_jac_ud * dud_dqi
f1_u = f1_jac_ui + f1_jac_ud * dud_dui
f2_q = (f2_jac_qi + f2_jac_qd * dqd_dqi + f2_jac_qdot * dqdot_dqi +
f2_jac_ud * dud_dqi)
f2_u = f2_jac_ui + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui
# Second case: configuration constraints only
elif l != 0:
dqd_dqi = - fh.jacobian(qd).LUsolve(fh.jacobian(qi))
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + (fku.jacobian(qd) + fkf.jacobian(qd)) *
dqd_dqi)
f1_q = (f1.jacobian(qi) + f1.jacobian(qd) * dqd_dqi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = (f2.jacobian(qi) + f2.jacobian(qd) * dqd_dqi +
f2.jac_qdot * dqdot_dqi)
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
# Third case: motion constraints only
elif m != 0:
dud_dqi = fnh.jacobian(ud).LUsolve(- fnh.jacobian(qi))
dud_dui = - fnh.jacobian(ud).LUsolve(fnh.jacobian(ui))
dqdot_dui = - self._k_kqdot.inv() * (fku.jacobian(ui) +
fku.jacobian(ud) * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + fku.jacobian(ud) * dud_dqi)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
f2_jac_ud = f2.jacobian(ud)
f1_q = f1.jacobian(qi) + f1_jac_ud * dud_dqi
f1_u = f1.jacobian(ui) + f1_jac_ud * dud_dui
f2_q = (f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi + f2_jac_ud
* dud_dqi)
f2_u = (f2.jacobian(ui) + f2_jac_ud * dud_dui + f2_jac_qdot *
dqdot_dui)
# Fourth case: No constraints
else:
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi))
f1_q = f1.jacobian(qi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
f_lin_A = -(f1_q.row_join(f1_u)).col_join(f2_q.row_join(f2_u))
if other_dyns:
f1_oths = f1.jacobian(other_dyns)
f2_oths = f2.jacobian(other_dyns)
f_lin_B = -f1_oths.col_join(f2_oths)
else:
f_lin_B = Matrix()
return (f_lin_A, f_lin_B, Matrix(other_dyns))
def kanes_equations(self, bodies, loads=None):
""" Method to form Kane's equations, Fr + Fr* = 0.
Returns (Fr, Fr*). In the case where auxiliary generalized speeds are
present (say, s auxiliary speeds, o generalized speeds, and m motion
constraints) the length of the returned vectors will be o - m + s in
length. The first o - m equations will be the constrained Kane's
equations, then the s auxiliary Kane's equations. These auxiliary
equations can be accessed with the auxiliary_eqs().
Parameters
==========
bodies : iterable
An iterable of all RigidBody's and Particle's in the system.
A system must have at least one body.
loads : iterable
Takes in an iterable of (Particle, Vector) or (ReferenceFrame, Vector)
tuples which represent the force at a point or torque on a frame.
Must be either a non-empty iterable of tuples or None which corresponds
to a system with no constraints.
"""
if (bodies is None and loads != None) or isinstance(bodies[0], tuple):
# This switches the order if they use the old way.
bodies, loads = loads, bodies
SymPyDeprecationWarning(value='The API for kanes_equations() has changed such '
'that the loads (forces and torques) are now the second argument '
'and is optional with None being the default.',
feature='The kanes_equation() argument order',
useinstead='switched argument order to update your code, For example: '
'kanes_equations(loads, bodies) > kanes_equations(bodies, loads).',
issue=10945, deprecated_since_version="1.1").warn()
if not self._k_kqdot:
raise AttributeError('Create an instance of KanesMethod with '
'kinematic differential equations to use this method.')
fr = self._form_fr(loads)
frstar = self._form_frstar(bodies)
if self._uaux:
if not self._udep:
km = KanesMethod(self._inertial, self.q, self._uaux,
u_auxiliary=self._uaux)
else:
km = KanesMethod(self._inertial, self.q, self._uaux,
u_auxiliary=self._uaux, u_dependent=self._udep,
velocity_constraints=(self._k_nh * self.u +
self._f_nh))
km._qdot_u_map = self._qdot_u_map
self._km = km
fraux = km._form_fr(loads)
frstaraux = km._form_frstar(bodies)
self._aux_eq = fraux + frstaraux
self._fr = fr.col_join(fraux)
self._frstar = frstar.col_join(frstaraux)
return (self._fr, self._frstar)
def rhs(self, inv_method=None):
"""Returns the system's equations of motion in first order form. The
output is the right hand side of::
x' = |q'| =: f(q, u, r, p, t)
|u'|
The right hand side is what is needed by most numerical ODE
integrators.
Parameters
==========
inv_method : str
The specific sympy inverse matrix calculation method to use. For a
list of valid methods, see
:meth:`~sympy.matrices.matrices.MatrixBase.inv`
"""
rhs = zeros(len(self.q) + len(self.u), c=1)
kdes = self.kindiffdict()
for i, q_i in enumerate(self.q):
rhs[i] = kdes[q_i.diff()]
if inv_method is None:
rhs[len(self.q):, 0] = self.mass_matrix.LUsolve(self.forcing)
else:
rhs[len(self.q):, 0] = (self.mass_matrix.inv(inv_method,
try_block_diag=True) *
self.forcing)
return rhs
def kindiffdict(self):
"""Returns a dictionary mapping q' to u."""
if not self._qdot_u_map:
raise AttributeError('Create an instance of KanesMethod with '
'kinematic differential equations to use this method.')
return self._qdot_u_map
@property
def auxiliary_eqs(self):
"""A matrix containing the auxiliary equations."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
if not self._uaux:
raise ValueError('No auxiliary speeds have been declared.')
return self._aux_eq
@property
def mass_matrix(self):
"""The mass matrix of the system."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
return Matrix([self._k_d, self._k_dnh])
@property
def mass_matrix_full(self):
"""The mass matrix of the system, augmented by the kinematic
differential equations."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
o = len(self.u)
n = len(self.q)
return ((self._k_kqdot).row_join(zeros(n, o))).col_join((zeros(o,
n)).row_join(self.mass_matrix))
@property
def forcing(self):
"""The forcing vector of the system."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
return -Matrix([self._f_d, self._f_dnh])
@property
def forcing_full(self):
"""The forcing vector of the system, augmented by the kinematic
differential equations."""
if not self._fr or not self._frstar:
raise ValueError('Need to compute Fr, Fr* first.')
f1 = self._k_ku * Matrix(self.u) + self._f_k
return -Matrix([f1, self._f_d, self._f_dnh])
@property
def q(self):
return self._q
@property
def u(self):
return self._u
@property
def bodylist(self):
return self._bodylist
@property
def forcelist(self):
return self._forcelist
| bsd-3-clause |
kevin8909/xjerp | openerp/addons/account/account_invoice.py | 9 | 96259 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
import openerp.addons.decimal_precision as dp
import openerp.exceptions
from openerp import netsvc
from openerp import pooler
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class account_invoice(osv.osv):
def _amount_all(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
res[invoice.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0
}
for line in invoice.invoice_line:
res[invoice.id]['amount_untaxed'] += line.price_subtotal
for line in invoice.tax_line:
res[invoice.id]['amount_tax'] += line.amount
res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed']
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
type_inv = context.get('type', 'out_invoice')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale_refund', 'in_refund': 'purchase_refund'}
journal_obj = self.pool.get('account.journal')
domain = [('company_id', '=', company_id)]
if isinstance(type_inv, list):
domain.append(('type', 'in', [type2journal.get(type) for type in type_inv if type2journal.get(type)]))
else:
domain.append(('type', '=', type2journal.get(type_inv, 'sale')))
res = journal_obj.search(cr, uid, domain, limit=1)
return res and res[0] or False
def _get_currency(self, cr, uid, context=None):
res = False
journal_id = self._get_journal(cr, uid, context=context)
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
res = journal.currency and journal.currency.id or journal.company_id.currency_id.id
return res
def _get_journal_analytic(self, cr, uid, type_inv, context=None):
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'}
tt = type2journal.get(type_inv, 'sale')
result = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=',tt)], context=context)
if not result:
raise osv.except_osv(_('No Analytic Journal!'),_("You must define an analytic journal of type '%s'!") % (tt,))
return result[0]
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', 'out_invoice')
def _reconciled(self, cr, uid, ids, name, args, context=None):
res = {}
wf_service = netsvc.LocalService("workflow")
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = self.test_paid(cr, uid, [inv.id])
if not res[inv.id] and inv.state == 'paid':
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'open_test', cr)
return res
def _get_reference_type(self, cr, uid, context=None):
return [('none', _('Free Reference'))]
def _amount_residual(self, cr, uid, ids, name, args, context=None):
"""Function of the field residua. It computes the residual amount (balance) for each invoice"""
if context is None:
context = {}
ctx = context.copy()
result = {}
currency_obj = self.pool.get('res.currency')
for invoice in self.browse(cr, uid, ids, context=context):
nb_inv_in_partial_rec = max_invoice_id = 0
result[invoice.id] = 0.0
if invoice.move_id:
for aml in invoice.move_id.line_id:
if aml.account_id.type in ('receivable','payable'):
if aml.currency_id and aml.currency_id.id == invoice.currency_id.id:
result[invoice.id] += aml.amount_residual_currency
else:
ctx['date'] = aml.date
result[invoice.id] += currency_obj.compute(cr, uid, aml.company_id.currency_id.id, invoice.currency_id.id, aml.amount_residual, context=ctx)
if aml.reconcile_partial_id.line_partial_ids:
#we check if the invoice is partially reconciled and if there are other invoices
#involved in this partial reconciliation (and we sum these invoices)
for line in aml.reconcile_partial_id.line_partial_ids:
if line.invoice and invoice.type == line.invoice.type:
nb_inv_in_partial_rec += 1
#store the max invoice id as for this invoice we will make a balance instead of a simple division
max_invoice_id = max(max_invoice_id, line.invoice.id)
if nb_inv_in_partial_rec:
#if there are several invoices in a partial reconciliation, we split the residual by the number
#of invoice to have a sum of residual amounts that matches the partner balance
new_value = currency_obj.round(cr, uid, invoice.currency_id, result[invoice.id] / nb_inv_in_partial_rec)
if invoice.id == max_invoice_id:
#if it's the last the invoice of the bunch of invoices partially reconciled together, we make a
#balance to avoid rounding errors
result[invoice.id] = result[invoice.id] - ((nb_inv_in_partial_rec - 1) * new_value)
else:
result[invoice.id] = new_value
#prevent the residual amount on the invoice to be less than 0
result[invoice.id] = max(result[invoice.id], 0.0)
return result
# Give Journal Items related to the payment reconciled to this invoice
# Return ids of partial and total payments related to the selected invoices
def _get_lines(self, cr, uid, ids, name, arg, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
id = invoice.id
res[id] = []
if not invoice.move_id:
continue
data_lines = [x for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id]
partial_ids = []
for line in data_lines:
ids_line = []
if line.reconcile_id:
ids_line = line.reconcile_id.line_id
elif line.reconcile_partial_id:
ids_line = line.reconcile_partial_id.line_partial_ids
l = map(lambda x: x.id, ids_line)
partial_ids.append(line.id)
res[id] =[x for x in l if x <> line.id and x not in partial_ids]
return res
def _get_invoice_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context):
result[line.invoice_id.id] = True
return result.keys()
def _get_invoice_tax(self, cr, uid, ids, context=None):
result = {}
for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context):
result[tax.invoice_id.id] = True
return result.keys()
def _compute_lines(self, cr, uid, ids, name, args, context=None):
result = {}
for invoice in self.browse(cr, uid, ids, context=context):
src = []
lines = []
if invoice.move_id:
for m in invoice.move_id.line_id:
temp_lines = []
if m.reconcile_id:
temp_lines = map(lambda x: x.id, m.reconcile_id.line_id)
elif m.reconcile_partial_id:
temp_lines = map(lambda x: x.id, m.reconcile_partial_id.line_partial_ids)
lines += [x for x in temp_lines if x not in lines]
src.append(m.id)
lines = filter(lambda x: x not in src, lines)
result[invoice.id] = lines
return result
def _get_invoice_from_line(self, cr, uid, ids, context=None):
move = {}
for line in self.pool.get('account.move.line').browse(cr, uid, ids, context=context):
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
move[line2.move_id.id] = True
if line.reconcile_id:
for line2 in line.reconcile_id.line_id:
move[line2.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
def _get_invoice_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
_name = "account.invoice"
_inherit = ['mail.thread']
_description = 'Invoice'
_order = "id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'paid' and obj['type'] in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open' and obj['type'] in ('out_invoice', 'out_refund'),
},
}
_columns = {
'name': fields.char('Description', size=64, select=True, readonly=True, states={'draft':[('readonly',False)]}),
'origin': fields.char('Source Document', size=64, help="Reference of the document that produced this invoice.", readonly=True, states={'draft':[('readonly',False)]}),
'supplier_invoice_number': fields.char('Supplier Invoice Number', size=64, help="The reference of this invoice as provided by the supplier.", readonly=True, states={'draft':[('readonly',False)]}),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True, select=True, change_default=True, track_visibility='always'),
'number': fields.related('move_id','name', type='char', readonly=True, size=64, relation='account.move', store=True, string='Number'),
'internal_number': fields.char('Invoice Number', size=32, readonly=True, help="Unique number of the invoice, computed automatically when the invoice is created."),
'reference': fields.char('Invoice Reference', size=64, help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=True, states={'draft':[('readonly',False)]}),
'comment': fields.text('Additional Information'),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
],'Status', select=True, readonly=True, track_visibility='onchange',
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma status,invoice does not have an invoice number. \
\n* The \'Open\' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice. \
\n* The \'Paid\' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled. \
\n* The \'Cancelled\' status is used when user cancel invoice.'),
'sent': fields.boolean('Sent', readonly=True, help="It indicates that the invoice has been sent."),
'date_invoice': fields.date('Invoice Date', readonly=True, states={'draft':[('readonly',False)]}, select=True, help="Keep empty to use the current date"),
'date_due': fields.date('Due Date', readonly=True, states={'draft':[('readonly',False)]}, select=True,
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. The payment term may compute several due dates, for example 50% now and 50% in one month, but if you want to force a due date, make sure that the payment term is not set on the invoice. If you keep the payment term and the due date empty, it means direct payment."),
'partner_id': fields.many2one('res.partner', 'Partner', change_default=True, readonly=True, required=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'payment_term': fields.many2one('account.payment.term', 'Payment Terms',readonly=True, states={'draft':[('readonly',False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "\
"The payment term may compute several due dates, for example 50% now, 50% in one month."),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], help="Keep empty to use the period of the validation(invoice) date.", readonly=True, states={'draft':[('readonly',False)]}),
'account_id': fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The partner account used for this invoice."),
'invoice_line': fields.one2many('account.invoice.line', 'invoice_id', 'Invoice Lines', readonly=True, states={'draft':[('readonly',False)]}),
'tax_line': fields.one2many('account.invoice.tax', 'invoice_id', 'Tax Lines', readonly=True, states={'draft':[('readonly',False)]}),
'move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, select=1, ondelete='restrict', help="Link to the automatically generated Journal Items."),
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Subtotal', track_visibility='always',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Tax',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'check_total': fields.float('Verification Total', digits_compute=dp.get_precision('Account'), readonly=True, states={'draft':[('readonly',False)]}),
'reconciled': fields.function(_reconciled, string='Paid/Reconciled', type='boolean',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, None, 50), # Check if we can remove ?
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
}, help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment."),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.', readonly=True, states={'draft':[('readonly',False)]}),
'move_lines':fields.function(_get_lines, type='many2many', relation='account.move.line', string='Entry Lines'),
'residual': fields.function(_amount_residual, digits_compute=dp.get_precision('Account'), string='Balance',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line','move_id'], 50),
'account.invoice.tax': (_get_invoice_tax, None, 50),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 50),
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
},
help="Remaining amount due."),
'payment_ids': fields.function(_compute_lines, relation='account.move.line', type="many2many", string='Payments'),
'move_name': fields.char('Journal Entry', size=64, readonly=True, states={'draft':[('readonly',False)]}),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True, track_visibility='onchange', states={'draft':[('readonly',False)]}),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True, states={'draft':[('readonly',False)]})
}
_defaults = {
'type': _get_type,
'state': 'draft',
'journal_id': _get_journal,
'currency_id': _get_currency,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.invoice', context=c),
'reference_type': 'none',
'check_total': 0.0,
'internal_number': False,
'user_id': lambda s, cr, u, c: u,
'sent': False,
}
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!'),
]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']:
partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0]
if not view_type:
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])
view_type = 'tree'
if view_type == 'form':
if partner['supplier'] and not partner['customer']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])
elif partner['customer'] and not partner['supplier']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])
if view_id and isinstance(view_id, (list, tuple)):
view_id = view_id[0]
res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('journal_type', False)
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type', False):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
res['arch'] = etree.tostring(doc)
if view_type == 'search':
if context.get('type', 'in_invoice') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
res['arch'] = etree.tostring(doc)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
def get_log_context(self, cr, uid, context=None):
if context is None:
context = {}
res = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'invoice_form')
view_id = res and res[1] or False
context['view_id'] = view_id
return context
def invoice_print(self, cr, uid, ids, context=None):
'''
This function prints the invoice and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.write(cr, uid, ids, {'sent': True}, context=context)
datas = {
'ids': ids,
'model': 'account.invoice',
'form': self.read(cr, uid, ids[0], context=context)
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.invoice',
'datas': datas,
'nodestroy' : True
}
def action_invoice_sent(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi invoice template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'account', 'email_template_edi_invoice')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'account.invoice',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_invoice_as_sent': True,
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def confirm_paid(self, cr, uid, ids, context=None):
if context is None:
context = {}
self.write(cr, uid, ids, {'state':'paid'}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoices = self.read(cr, uid, ids, ['state','internal_number'], context=context)
unlink_ids = []
for t in invoices:
if t['state'] not in ('draft', 'cancel'):
raise openerp.exceptions.Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
elif t['internal_number']:
raise openerp.exceptions.Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
else:
unlink_ids.append(t['id'])
osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
partner_payment_term = False
acc_id = False
bank_id = False
fiscal_position = False
opt = [('uid', str(uid))]
if partner_id:
opt.insert(0, ('id', partner_id))
p = self.pool.get('res.partner').browse(cr, uid, partner_id)
if company_id:
if (p.property_account_receivable.company_id and (p.property_account_receivable.company_id.id != company_id)) and (p.property_account_payable.company_id and (p.property_account_payable.company_id.id != company_id)):
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr,uid,rec_pro_id,['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr,uid,pay_pro_id,['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of accounts for this company, you should create one.'))
account_obj = self.pool.get('account.account')
rec_obj_acc = account_obj.browse(cr, uid, [rec_res_id])
pay_obj_acc = account_obj.browse(cr, uid, [pay_res_id])
p.property_account_receivable = rec_obj_acc[0]
p.property_account_payable = pay_obj_acc[0]
if type in ('out_invoice', 'out_refund'):
acc_id = p.property_account_receivable.id
partner_payment_term = p.property_payment_term and p.property_payment_term.id or False
else:
acc_id = p.property_account_payable.id
partner_payment_term = p.property_supplier_payment_term and p.property_supplier_payment_term.id or False
fiscal_position = p.property_account_position and p.property_account_position.id or False
if p.bank_ids:
bank_id = p.bank_ids[0].id
result = {'value': {
'account_id': acc_id,
'payment_term': partner_payment_term,
'fiscal_position': fiscal_position
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr, uid, ids, partner_payment_term, date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_journal_id(self, cr, uid, ids, journal_id=False, context=None):
result = {}
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
currency_id = journal.currency and journal.currency.id or journal.company_id.currency_id.id
company_id = journal.company_id.id
result = {'value': {
'currency_id': currency_id,
'company_id': company_id,
}
}
return result
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
res = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not date_invoice:
date_invoice = time.strftime('%Y-%m-%d')
if not payment_term_id:
inv = self.browse(cr, uid, ids[0])
#To make sure the invoice due date should contain due date which is entered by user when there is no payment term defined
return {'value':{'date_due': inv.date_due and inv.date_due or date_invoice}}
pterm_list = self.pool.get('account.payment.term').compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
res = {'value':{'date_due': pterm_list[-1]}}
else:
raise osv.except_osv(_('Insufficient Data!'), _('The payment term of supplier does not have a payment term line.'))
return res
def onchange_invoice_line(self, cr, uid, ids, lines):
return {}
def onchange_partner_bank(self, cursor, user, ids, partner_bank_id=False):
return {'value': {}}
def onchange_company_id(self, cr, uid, ids, company_id, part_id, type, invoice_line, currency_id):
#TODO: add the missing context parameter when forward-porting in trunk so we can remove
# this hack!
context = self.pool['res.users'].context_get(cr, uid)
val = {}
dom = {}
obj_journal = self.pool.get('account.journal')
account_obj = self.pool.get('account.account')
inv_line_obj = self.pool.get('account.invoice.line')
if company_id and part_id and type:
acc_id = False
partner_obj = self.pool.get('res.partner').browse(cr,uid,part_id)
if partner_obj.property_account_payable and partner_obj.property_account_receivable:
if partner_obj.property_account_payable.company_id.id != company_id and partner_obj.property_account_receivable.company_id.id != company_id:
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr, uid, rec_pro_id, ['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr, uid, pay_pro_id, ['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_res_id
else:
acc_id = pay_res_id
val= {'account_id': acc_id}
if ids:
if company_id:
inv_obj = self.browse(cr,uid,ids)
for line in inv_obj[0].invoice_line:
if line.account_id:
if line.account_id.company_id.id != company_id:
result_id = account_obj.search(cr, uid, [('name','=',line.account_id.name),('company_id','=',company_id)])
if not result_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
inv_line_obj.write(cr, uid, [line.id], {'account_id': result_id[-1]})
else:
if invoice_line:
for inv_line in invoice_line:
obj_l = account_obj.browse(cr, uid, inv_line[2]['account_id'])
if obj_l.company_id.id != company_id:
raise osv.except_osv(_('Configuration Error!'),
_('Invoice line account\'s company and invoice\'s company does not match.'))
else:
continue
if company_id and type:
journal_mapping = {
'out_invoice': 'sale',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
'in_invoice': 'purchase',
}
journal_type = journal_mapping[type]
journal_ids = obj_journal.search(cr, uid, [('company_id','=',company_id), ('type', '=', journal_type)])
if journal_ids:
val['journal_id'] = journal_ids[0]
ir_values_obj = self.pool.get('ir.values')
res_journal_default = ir_values_obj.get(cr, uid, 'default', 'type=%s' % (type), ['account.invoice'])
for r in res_journal_default:
if r[1] == 'journal_id' and r[2] in journal_ids:
val['journal_id'] = r[2]
if not val.get('journal_id', False):
journal_type_map = dict(obj_journal._columns['type'].selection)
journal_type_label = self.pool['ir.translation']._get_source(cr, uid, None, ('code','selection'),
context.get('lang'),
journal_type_map.get(journal_type))
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.') % ('"%s"' % journal_type_label))
dom = {'journal_id': [('id', 'in', journal_ids)]}
else:
journal_ids = obj_journal.search(cr, uid, [])
return {'value': val, 'domain': dom}
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state':'draft'})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
# Workflow stuff
#################
# return the ids of the move lines which has the same account than the invoice
# whose id is in ids
def move_line_id_payment_get(self, cr, uid, ids, *args):
if not ids: return []
result = self.move_line_id_payment_gets(cr, uid, ids, *args)
return result.get(ids[0], [])
def move_line_id_payment_gets(self, cr, uid, ids, *args):
res = {}
if not ids: return res
cr.execute('SELECT i.id, l.id '\
'FROM account_move_line l '\
'LEFT JOIN account_invoice i ON (i.move_id=l.move_id) '\
'WHERE i.id IN %s '\
'AND l.account_id=i.account_id',
(tuple(ids),))
for r in cr.fetchall():
res.setdefault(r[0], [])
res[r[0]].append( r[1] )
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'state':'draft',
'number':False,
'move_id':False,
'move_name':False,
'internal_number': False,
'period_id': False,
'sent': False,
})
if 'date_invoice' not in default:
default.update({
'date_invoice':False
})
if 'date_due' not in default:
default.update({
'date_due':False
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def test_paid(self, cr, uid, ids, *args):
res = self.move_line_id_payment_get(cr, uid, ids)
if not res:
return False
ok = True
for id in res:
cr.execute('select reconcile_id from account_move_line where id=%s', (id,))
ok = ok and bool(cr.fetchone()[0])
return ok
def button_reset_taxes(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
ait_obj = self.pool.get('account.invoice.tax')
for id in ids:
cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (id,))
partner = self.browse(cr, uid, id, context=ctx).partner_id
if partner.lang:
ctx.update({'lang': partner.lang})
for taxe in ait_obj.compute(cr, uid, id, context=ctx).values():
ait_obj.create(cr, uid, taxe)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {'invoice_line':[]}, context=ctx)
return True
def button_compute(self, cr, uid, ids, context=None, set_total=False):
self.button_reset_taxes(cr, uid, ids, context)
for inv in self.browse(cr, uid, ids, context=context):
if set_total:
self.pool.get('account.invoice').write(cr, uid, [inv.id], {'check_total': inv.amount_total})
return True
def _convert_ref(self, cr, uid, ref):
return (ref or '').replace('/','')
def _get_analytic_lines(self, cr, uid, id, context=None):
if context is None:
context = {}
inv = self.browse(cr, uid, id)
cur_obj = self.pool.get('res.currency')
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = self.pool.get('account.invoice.line').move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il['account_analytic_id']:
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
if not inv.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (inv.journal_id.name,))
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': inv['date_invoice'],
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context={'date': inv.date_invoice}) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': inv.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
def action_date_assign(self, cr, uid, ids, *args):
for inv in self.browse(cr, uid, ids):
res = self.onchange_payment_term_date_invoice(cr, uid, inv.id, inv.payment_term.id, inv.date_invoice)
if res and res['value']:
self.write(cr, uid, [inv.id], res['value'])
return True
def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):
"""finalize_invoice_move_lines(cr, uid, invoice, move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and possibly alter the
move lines to be created by an invoice, for special cases.
:param invoice_browse: browsable record of the invoice that is generating the move lines
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
def check_tax_lines(self, cr, uid, inv, compute_taxes, ait_obj):
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id
if not inv.tax_line:
for tax in compute_taxes.values():
ait_obj.create(cr, uid, tax)
else:
tax_key = []
for tax in inv.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id, tax.account_analytic_id.id)
tax_key.append(key)
if not key in compute_taxes:
raise osv.except_osv(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
if abs(base - tax.base) > company_currency.rounding:
raise osv.except_osv(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if not key in tax_key:
raise osv.except_osv(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
def compute_invoice_totals(self, cr, uid, inv, company_currency, ref, invoice_move_lines, context=None):
if context is None:
context={}
total = 0
total_currency = 0
cur_obj = self.pool.get('res.currency')
for i in invoice_move_lines:
if inv.currency_id.id != company_currency:
context.update({'date': inv.date_invoice or time.strftime('%Y-%m-%d')})
i['currency_id'] = inv.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, inv.currency_id.id,
company_currency, i['price'],
context=context)
else:
i['amount_currency'] = False
i['currency_id'] = False
i['ref'] = ref
if inv.type in ('out_invoice','in_refund'):
total += i['price']
total_currency += i['amount_currency'] or i['price']
i['price'] = - i['price']
else:
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s"%(
invoice_line['account_id'],
invoice_line.get('tax_code_id',"False"),
invoice_line.get('product_id',"False"),
invoice_line.get('analytic_account_id',"False"),
invoice_line.get('date_maturity',"False"))
def group_lines(self, cr, uid, iml, line, inv):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if inv.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(inv, l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
def action_move_create(self, cr, uid, ids, context=None):
"""Creates invoice related analytics and financial move lines"""
ait_obj = self.pool.get('account.invoice.tax')
cur_obj = self.pool.get('res.currency')
period_obj = self.pool.get('account.period')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
if context is None:
context = {}
for inv in self.browse(cr, uid, ids, context=context):
if not inv.journal_id.sequence_id:
raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = context.copy()
ctx.update({'lang': inv.partner_id.lang})
if not inv.date_invoice:
self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
# create the analytical lines
# one move line per invoice line
iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)
# check if taxes are all computed
compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)
self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)
# I disabled the check_total feature
group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]
group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)
if group_check_total and uid in [x.id for x in group_check_total.users]:
if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):
raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise osv.except_osv(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += ait_obj.move_line_get(cr, uid, inv.id)
entry_type = ''
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
entry_type = 'journal_pur_voucher'
if inv.type == 'in_refund':
entry_type = 'cont_voucher'
else:
ref = self._convert_ref(cr, uid, inv.number)
entry_type = 'journal_sale_vou'
if inv.type == 'out_refund':
entry_type = 'cont_voucher'
diff_currency_p = inv.currency_id.id <> company_currency
# create one move line for the total and possibly adjust the other lines amount
total = 0
total_currency = 0
total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)
acc_id = inv.account_id.id
name = inv['name'] or inv['supplier_invoice_number'] or '/'
totlines = False
if inv.payment_term:
totlines = payment_term_obj.compute(cr,
uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)
if totlines:
res_amount_currency = total_currency
i = 0
ctx.update({'date': inv.date_invoice})
for t in totlines:
if inv.currency_id.id != company_currency:
amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)
else:
amount_currency = False
# last line add the diff
res_amount_currency -= amount_currency or 0
i += 1
if i == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': acc_id,
'date_maturity': t[0],
'amount_currency': diff_currency_p \
and amount_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': acc_id,
'date_maturity': inv.date_due or False,
'amount_currency': diff_currency_p \
and total_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref
})
date = inv.date_invoice or time.strftime('%Y-%m-%d')
part = self.pool.get("res.partner")._find_accounting_partner(inv.partner_id)
line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)
line = self.group_lines(cr, uid, iml, line, inv)
journal_id = inv.journal_id.id
journal = journal_obj.browse(cr, uid, journal_id, context=ctx)
if journal.centralisation:
raise osv.except_osv(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = self.finalize_invoice_move_lines(cr, uid, inv, line)
move = {
'ref': inv.reference and inv.reference or inv.name,
'line_id': line,
'journal_id': journal_id,
'date': date,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
period_id = inv.period_id and inv.period_id.id or False
ctx.update(company_id=inv.company_id.id,
account_period_prefer_normal=True)
if not period_id:
period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)
period_id = period_ids and period_ids[0] or False
if period_id:
move['period_id'] = period_id
for i in line:
i[2]['period_id'] = period_id
ctx.update(invoice=inv)
move_id = move_obj.create(cr, uid, move, context=ctx)
new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name
# make the invoice point to that move
self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move_obj.post(cr, uid, [move_id], context=ctx)
self._log_event(cr, uid, ids)
return True
def invoice_validate(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'open'}, context=context)
return True
def line_get_convert(self, cr, uid, x, part, date, context=None):
return {
'date_maturity': x.get('date_maturity', False),
'partner_id': part,
'name': x['name'][:64],
'date': date,
'debit': x['price']>0 and x['price'],
'credit': x['price']<0 and -x['price'],
'account_id': x['account_id'],
'analytic_lines': x.get('analytic_lines', []),
'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)),
'currency_id': x.get('currency_id', False),
'tax_code_id': x.get('tax_code_id', False),
'tax_amount': x.get('tax_amount', False),
'ref': x.get('ref', False),
'quantity': x.get('quantity',1.00),
'product_id': x.get('product_id', False),
'product_uom_id': x.get('uos_id', False),
'analytic_account_id': x.get('account_analytic_id', False),
}
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids, context=context):
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.reference or ''
self.write(cr, uid, ids, {'internal_number': number})
if invtype in ('in_invoice', 'in_refund'):
if not reference:
ref = self._convert_ref(cr, uid, number)
else:
ref = reference
else:
ref = self._convert_ref(cr, uid, number)
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
account_move_obj = self.pool.get('account.move')
invoices = self.read(cr, uid, ids, ['move_id', 'payment_ids'])
move_ids = [] # ones that we will need to remove
for i in invoices:
if i['move_id']:
move_ids.append(i['move_id'][0])
if i['payment_ids']:
account_move_line_obj = self.pool.get('account.move.line')
pay_ids = account_move_line_obj.browse(cr, uid, i['payment_ids'])
for move_line in pay_ids:
if move_line.reconcile_partial_id and move_line.reconcile_partial_id.line_partial_ids:
raise osv.except_osv(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write(cr, uid, ids, {'state':'cancel', 'move_id':False})
if move_ids:
# second, invalidate the move(s)
account_move_obj.button_cancel(cr, uid, move_ids, context=context)
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
account_move_obj.unlink(cr, uid, move_ids, context=context)
self._log_event(cr, uid, ids, -1.0, 'Cancel Invoice')
return True
###################
def list_distinct_taxes(self, cr, uid, ids):
invoices = self.browse(cr, uid, ids)
taxes = {}
for inv in invoices:
for tax in inv.tax_line:
if not tax['name'] in taxes:
taxes[tax['name']] = {'name': tax['name']}
return taxes.values()
def _log_event(self, cr, uid, ids, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
types = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
return [(r['id'], '%s %s' % (r['number'] or types[r['type']], r['name'] or '')) for r in self.read(cr, uid, ids, ['type', 'number', 'name'], context, load='_classic_write')]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if context is None:
context = {}
ids = []
if name:
ids = self.search(cr, user, [('number','=',name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def _refund_cleanup_lines(self, cr, uid, lines, context=None):
"""Convert records to dict of values suitable for one2many line creation
:param list(browse_record) lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
clean_lines = []
for line in lines:
clean_line = {}
for field in line._all_columns.keys():
if line._all_columns[field].column._type == 'many2one':
clean_line[field] = line[field].id
elif line._all_columns[field].column._type not in ['many2many','one2many']:
clean_line[field] = line[field]
elif field == 'invoice_line_tax_id':
tax_list = []
for tax in line[field]:
tax_list.append(tax.id)
clean_line[field] = [(6,0, tax_list)]
clean_lines.append(clean_line)
return map(lambda x: (0,0,x), clean_lines)
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
"""Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param integer invoice_id: id of the invoice to refund
:param dict invoice: read of the invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
obj_journal = self.pool.get('account.journal')
type_dict = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
invoice_data = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._all_columns[field].column._type == 'many2one':
invoice_data[field] = invoice[field].id
else:
invoice_data[field] = invoice[field] if invoice[field] else False
invoice_lines = self._refund_cleanup_lines(cr, uid, invoice.invoice_line, context=context)
tax_lines = filter(lambda l: l['manual'], invoice.tax_line)
tax_lines = self._refund_cleanup_lines(cr, uid, tax_lines, context=context)
if journal_id:
refund_journal_ids = [journal_id]
elif invoice['type'] == 'in_invoice':
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','purchase_refund')], context=context)
else:
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','sale_refund')], context=context)
if not date:
date = time.strftime('%Y-%m-%d')
invoice_data.update({
'type': type_dict[invoice['type']],
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'journal_id': refund_journal_ids and refund_journal_ids[0] or False,
})
if period_id:
invoice_data['period_id'] = period_id
if description:
invoice_data['name'] = description
return invoice_data
def refund(self, cr, uid, ids, date=None, period_id=None, description=None, journal_id=None, context=None):
new_ids = []
for invoice in self.browse(cr, uid, ids, context=context):
invoice = self._prepare_refund(cr, uid, invoice,
date=date,
period_id=period_id,
description=description,
journal_id=journal_id,
context=context)
# create the new invoice
new_ids.append(self.create(cr, uid, invoice, context=context))
return new_ids
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
if context is None:
context = {}
#TODO check if we can use different period for payment and the writeoff line
assert len(ids)==1, "Can only pay one invoice at a time."
invoice = self.browse(cr, uid, ids[0], context=context)
src_account_id = invoice.account_id.id
# Take the seq as name for move
types = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = types[invoice.type]
#take the choosen date
if 'date_p' in context and context['date_p']:
date=context['date_p']
else:
date=time.strftime('%Y-%m-%d')
# Take the amount in currency and the currency of the payment
if 'amount_currency' in context and context['amount_currency'] and 'currency_id' in context and context['currency_id']:
amount_currency = context['amount_currency']
currency_id = context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.pool.get('account.journal').read(cr, uid, pay_journal_id, ['type'], context=context)
if invoice.type in ('in_invoice', 'out_invoice'):
if pay_journal['type'] == 'bank':
entry_type = 'bank_pay_voucher' # Bank payment
else:
entry_type = 'pay_voucher' # Cash payment
else:
entry_type = 'cont_voucher'
if invoice.type in ('in_invoice', 'in_refund'):
ref = invoice.reference
else:
ref = self._convert_ref(cr, uid, invoice.number)
partner = self.pool['res.partner']._find_accounting_partner(invoice.partner_id)
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'debit': direction * pay_amount>0 and direction * pay_amount,
'credit': direction * pay_amount<0 and - direction * pay_amount,
'account_id': src_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
l2 = {
'debit': direction * pay_amount<0 and - direction * pay_amount,
'credit': direction * pay_amount>0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and - direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
if not name:
name = invoice.invoice_line and invoice.invoice_line[0].name or invoice.number
l1['name'] = name
l2['name'] = name
lines = [(0, 0, l1), (0, 0, l2)]
move = {'ref': ref, 'line_id': lines, 'journal_id': pay_journal_id, 'period_id': period_id, 'date': date}
move_id = self.pool.get('account.move').create(cr, uid, move, context=context)
line_ids = []
total = 0.0
line = self.pool.get('account.move.line')
move_ids = [move_id,]
if invoice.move_id:
move_ids.append(invoice.move_id.id)
cr.execute('SELECT id FROM account_move_line '\
'WHERE move_id IN %s',
((move_id, invoice.move_id.id),))
lines = line.browse(cr, uid, map(lambda x: x[0], cr.fetchall()) )
for l in lines+invoice.payment_ids:
if l.account_id.id == src_account_id:
line_ids.append(l.id)
total += (l.debit or 0.0) - (l.credit or 0.0)
inv_id, name = self.name_get(cr, uid, [invoice.id], context=context)[0]
if (not round(total,self.pool.get('decimal.precision').precision_get(cr, uid, 'Account'))) or writeoff_acc_id:
self.pool.get('account.move.line').reconcile(cr, uid, line_ids, 'manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context)
else:
code = invoice.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, invoice.amount_total, code, total, code)
self.message_post(cr, uid, [inv_id], body=msg, context=context)
self.pool.get('account.move.line').reconcile_partial(cr, uid, line_ids, 'manual', context)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {}, context=context)
return True
class account_invoice_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
price = line.price_unit * (1-(line.discount or 0.0)/100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, partner=line.invoice_id.partner_id)
res[line.id] = taxes['total']
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
def _price_unit_default(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('check_total', False):
t = context['check_total']
for l in context.get('invoice_line', {}):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
tax_obj = self.pool.get('account.tax')
p = l[2].get('price_unit', 0) * (1-l[2].get('discount', 0)/100.0)
t = t - (p * l[2].get('quantity'))
taxes = l[2].get('invoice_line_tax_id')
if len(taxes[0]) >= 3 and taxes[0][2]:
taxes = tax_obj.browse(cr, uid, list(taxes[0][2]))
for tax in tax_obj.compute_all(cr, uid, taxes, p,l[2].get('quantity'), l[2].get('product_id', False), context.get('partner_id', False))['taxes']:
t = t - tax['amount']
return t
return 0
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document', size=256, help="Reference of the document that produced this invoice."),
'sequence': fields.integer('Sequence', help="Gives the sequence of this line when displaying the invoice."),
'invoice_id': fields.many2one('account.invoice', 'Invoice Reference', ondelete='cascade', select=True),
'uos_id': fields.many2one('product.uom', 'Unit of Measure', ondelete='set null', select=True),
'product_id': fields.many2one('product.product', 'Product', ondelete='set null', select=True),
'account_id': fields.many2one('account.account', 'Account', required=True, domain=[('type','<>','view'), ('type', '<>', 'closed')], help="The income or expense account related to the selected product."),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Amount', type="float",
digits_compute= dp.get_precision('Account'), store=True),
'quantity': fields.float('Quantity', digits_compute= dp.get_precision('Product Unit of Measure'), required=True),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount')),
'invoice_line_tax_id': fields.many2many('account.tax', 'account_invoice_line_tax', 'invoice_line_id', 'tax_id', 'Taxes', domain=[('parent_id','=',False)]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('invoice_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'partner_id': fields.related('invoice_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True)
}
def _default_account_id(self, cr, uid, context=None):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if context is None:
context = {}
if context.get('type') in ('out_invoice','out_refund'):
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_income_categ', 'product.category', context=context)
else:
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_expense_categ', 'product.category', context=context)
return prop and prop.id or False
_defaults = {
'quantity': 1,
'discount': 0.0,
'price_unit': _price_unit_default,
'account_id': _default_account_id,
'sequence': 10,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
res = super(account_invoice_line,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if context.get('type', False):
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='product_id']"):
if context['type'] in ('in_invoice', 'in_refund'):
node.set('domain', "[('purchase_ok', '=', True)]")
else:
node.set('domain', "[('sale_ok', '=', True)]")
res['arch'] = etree.tostring(doc)
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id, 'force_company': company_id})
if not partner_id:
raise osv.except_osv(_('No Partner Defined!'),_("You must first select a partner!") )
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain':{'product_uom':[]}}
else:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
if part.lang:
context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type in ('out_invoice','out_refund'):
a = res.property_account_income.id
if not a:
a = res.categ_id.property_account_income_categ.id
else:
a = res.property_account_expense.id
if not a:
a = res.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
if a:
result['account_id'] = a
if type in ('out_invoice', 'out_refund'):
taxes = res.taxes_id and res.taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
else:
taxes = res.supplier_taxes_id and res.supplier_taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if type in ('in_invoice', 'in_refund'):
result.update( {'price_unit': price_unit or res.standard_price,'invoice_line_tax_id': tax_id} )
else:
result.update({'price_unit': res.list_price, 'invoice_line_tax_id': tax_id})
result['name'] = res.partner_ref
result['uos_id'] = uom_id or res.uom_id.id
if res.description:
result['name'] += '\n'+res.description
domain = {'uos_id':[('category_id','=',res.uom_id.category_id.id)]}
res_final = {'value':result, 'domain':domain}
if not company_id or not currency_id:
return res_final
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
currency = self.pool.get('res.currency').browse(cr, uid, currency_id, context=context)
if company.currency_id.id != currency.id:
if type in ('in_invoice', 'in_refund'):
res_final['value']['price_unit'] = res.standard_price
new_price = res_final['value']['price_unit'] * currency.rate
res_final['value']['price_unit'] = new_price
if result['uos_id'] and result['uos_id'] != res.uom_id.id:
selected_uom = self.pool.get('product.uom').browse(cr, uid, result['uos_id'], context=context)
new_price = self.pool.get('product.uom')._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uos_id'])
res_final['value']['price_unit'] = new_price
return res_final
def uos_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id})
warning = {}
res = self.product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context=context)
if not uom:
res['value']['price_unit'] = 0.0
if product and uom:
prod = self.pool.get('product.product').browse(cr, uid, product, context=context)
prod_uom = self.pool.get('product.uom').browse(cr, uid, uom, context=context)
if prod.uom_id.category_id.id != prod_uom.category_id.id:
warning = {
'title': _('Warning!'),
'message': _('The selected unit of measure is not compatible with the unit of measure of the product.')
}
res['value'].update({'uos_id': prod.uom_id.id})
return {'value': res['value'], 'warning': warning}
return res
def move_line_get(self, cr, uid, invoice_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
mres = self.move_line_get_item(cr, uid, line, context)
if not mres:
continue
res.append(mres)
tax_code_found= False
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id,
(line.price_unit * (1.0 - (line['discount'] or 0.0) / 100.0)),
line.quantity, line.product_id,
inv.partner_id)['taxes']:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = line.price_subtotal * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(self.move_line_get_item(cr, uid, line, context))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, tax_amount, context={'date': inv.date_invoice})
return res
def move_line_get_item(self, cr, uid, line, context=None):
return {
'type':'src',
'name': line.name.split('\n')[0][:64],
'price_unit':line.price_unit,
'quantity':line.quantity,
'price':line.price_subtotal,
'account_id':line.account_id.id,
'product_id':line.product_id.id,
'uos_id':line.uos_id.id,
'account_analytic_id':line.account_analytic_id.id,
'taxes':line.invoice_line_tax_id,
}
#
# Set the tax field according to the account and the fiscal position
#
def onchange_account_id(self, cr, uid, ids, product_id, partner_id, inv_type, fposition_id, account_id):
if not account_id:
return {}
unique_tax_ids = []
fpos = fposition_id and self.pool.get('account.fiscal.position').browse(cr, uid, fposition_id) or False
account = self.pool.get('account.account').browse(cr, uid, account_id)
if not product_id:
taxes = account.tax_ids
unique_tax_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, taxes)
else:
product_change_result = self.product_id_change(cr, uid, ids, product_id, False, type=inv_type,
partner_id=partner_id, fposition_id=fposition_id,
company_id=account.company_id.id)
if product_change_result and 'value' in product_change_result and 'invoice_line_tax_id' in product_change_result['value']:
unique_tax_ids = product_change_result['value']['invoice_line_tax_id']
return {'value':{'invoice_line_tax_id': unique_tax_ids}}
account_invoice_line()
class account_invoice_tax(osv.osv):
_name = "account.invoice.tax"
_description = "Invoice Tax"
def _count_factor(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice_tax in self.browse(cr, uid, ids, context=context):
res[invoice_tax.id] = {
'factor_base': 1.0,
'factor_tax': 1.0,
}
if invoice_tax.amount <> 0.0:
factor_tax = invoice_tax.tax_amount / invoice_tax.amount
res[invoice_tax.id]['factor_tax'] = factor_tax
if invoice_tax.base <> 0.0:
factor_base = invoice_tax.base_amount / invoice_tax.base
res[invoice_tax.id]['factor_base'] = factor_base
return res
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice Line', ondelete='cascade', select=True),
'name': fields.char('Tax Description', size=64, required=True),
'account_id': fields.many2one('account.account', 'Tax Account', required=True, domain=[('type','<>','view'),('type','<>','income'), ('type', '<>', 'closed')]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'base': fields.float('Base', digits_compute=dp.get_precision('Account')),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'manual': fields.boolean('Manual'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of invoice tax."),
'base_code_id': fields.many2one('account.tax.code', 'Base Code', help="The account basis of the tax declaration."),
'base_amount': fields.float('Base Code Amount', digits_compute=dp.get_precision('Account')),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Code', help="The tax basis of the tax declaration."),
'tax_amount': fields.float('Tax Code Amount', digits_compute=dp.get_precision('Account')),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'factor_base': fields.function(_count_factor, string='Multipication factor for Base code', type='float', multi="all"),
'factor_tax': fields.function(_count_factor, string='Multipication factor Tax code', type='float', multi="all")
}
def base_change(self, cr, uid, ids, base, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_base'])['factor_base']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
base = cur_obj.compute(cr, uid, currency_id, company_currency, base*factor, context={'date': date_invoice or time.strftime('%Y-%m-%d')}, round=False)
return {'value': {'base_amount':base}}
def amount_change(self, cr, uid, ids, amount, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_tax'])['factor_tax']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
amount = cur_obj.compute(cr, uid, currency_id, company_currency, amount*factor, context={'date': date_invoice or time.strftime('%Y-%m-%d')}, round=False)
return {'value': {'tax_amount': amount}}
_order = 'sequence'
_defaults = {
'manual': 1,
'base_amount': 0.0,
'tax_amount': 0.0,
}
def compute(self, cr, uid, invoice_id, context=None):
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
cur = inv.currency_id
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit* (1-(line.discount or 0.0)/100.0)), line.quantity, line.product_id, inv.partner_id)['taxes']:
val={}
val['invoice_id'] = inv.id
val['name'] = tax['name']
val['amount'] = tax['amount']
val['manual'] = False
val['sequence'] = tax['sequence']
val['base'] = cur_obj.round(cr, uid, cur, tax['price_unit'] * line['quantity'])
if inv.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
key = (val['tax_code_id'], val['base_code_id'], val['account_id'], val['account_analytic_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = cur_obj.round(cr, uid, cur, t['base'])
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
def move_line_get(self, cr, uid, invoice_id):
res = []
cr.execute('SELECT * FROM account_invoice_tax WHERE invoice_id=%s', (invoice_id,))
for t in cr.dictfetchall():
if not t['amount'] \
and not t['tax_code_id'] \
and not t['tax_amount']:
continue
res.append({
'type':'tax',
'name':t['name'],
'price_unit': t['amount'],
'quantity': 1,
'price': t['amount'] or 0.0,
'account_id': t['account_id'],
'tax_code_id': t['tax_code_id'],
'tax_amount': t['tax_amount'],
'account_analytic_id': t['account_analytic_id'],
})
return res
class res_partner(osv.osv):
""" Inherits partner and adds invoice information in the partner form """
_inherit = 'res.partner'
_columns = {
'invoice_ids': fields.one2many('account.invoice.line', 'partner_id', 'Invoices', readonly=True),
}
def _find_accounting_partner(self, partner):
'''
Find the partner for which the accounting entries will be created
'''
# FIXME: after 7.0, to replace by function field partner.commercial_partner_id
#if the chosen partner is not a company and has a parent company, use the parent for the journal entries
#because you want to invoice 'Agrolait, accounting department' but the journal items are for 'Agrolait'
while not partner.is_company and partner.parent_id:
partner = partner.parent_id
return partner
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({'invoice_ids' : []})
return super(res_partner, self).copy(cr, uid, id, default, context)
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'account.invoice' and context.get('default_res_id') and context.get('mark_invoice_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('account.invoice').write(cr, uid, [context['default_res_id']], {'sent': True}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lakewood999/Ciphers | rcipherx.py | 1 | 5693 | '''
=================================================================================================
R Cipher Suite
Includes all variants of the R cipher
=================================================================================================
Developed by: ProgramRandom, a division of RandomCorporations
A Page For This Project Will Be Created Soon On lakewood999.github.io
Visit my webpage at: https://lakewood999.github.io -- Note that this is my personal page, not the RandomCorporations page
=================================================================================================
What is the R cipher: This is just a random dipher that I came up with. I will not say this is a good cipher, or perfect cipher, but it's just something I decided to make. The R cipher is an improved version of the Caesar cipher
Root of the name: R cipher
-Well, cipher is just what it is, and R stands for random, or things being randomly generated
=================================================================================================
License:
You are free to use this script free of charge, however, I am not responsible for any types of problems caused by this script. By using this program, you agree to not hold be liable for any charges related to this programe.
You are free to modify, and distribute this software(free of charge), but you are NOT allowed to commercialize this software(sell). Please attribute this program to me if you are sharing it, or re-distributing it
=================================================================================================
Status:
This project is currently a WIP
-Variant "i" of the R cipher comping up
Version: Version 1: The X Update
R Cipher X - Progress: 100%
=================================================================================================
'''
import random
def letterout(x):
out = ""
x = str(x)
if x == "1":
out = "a"
elif x == "2":
out = "b"
elif x == "3":
out = "c"
elif x == "4":
out = "d"
elif x == "5":
out = "e"
elif x == "6":
out = "f"
elif x == "7":
out = "g"
elif x == "8":
out = "h"
elif x == "9":
out = "i"
elif x == "10":
out = "j"
elif x == "11":
out = "k"
elif x == "12":
out = "l"
elif x == "13":
out = "m"
elif x == "14":
out = "n"
elif x == "15":
out = "o"
elif x == "16":
out = "p"
elif x == "17":
out = "q"
elif x == "18":
out = "r"
elif x == "19":
out = "s"
elif x == "20":
out = "t"
elif x == "21":
out = "u"
elif x == "22":
out = "v"
elif x == "23":
out = "w"
elif x == "24":
out = "x"
elif x == "25":
out = "y"
elif x == "26":
out = "z"
return out
#This is script just returns the number depnding on the input--WIP Need to alternate
def numberout(x):
out = ""
if x == "a":
out = "1"
elif x == "":
out = "0"
elif x == "b":
out = "2"
elif x == "c":
out = "3"
elif x == "d":
out = "4"
elif x == "e":
out = "5"
elif x == "f":
out = "6"
elif x == "g":
out = "7"
elif x == "h":
out = "8"
elif x == "i":
out = "9"
elif x == "j":
out = "10"
elif x == "k":
out = "11"
elif x == "l":
out = "12"
elif x == "m":
out = "13"
elif x == "n":
out = "14"
elif x == "o":
out = "15"
elif x == "p":
out = "16"
elif x == "q":
out = "17"
elif x == "r":
out = "18"
elif x == "s":
out = "19"
elif x == "t":
out = "20"
elif x == "u":
out = "21"
elif x == "v":
out = "22"
elif x == "w":
out = "23"
elif x == "x":
out = "24"
elif x == "y":
out = "25"
elif x == "z":
out = "26"
return out
def rcipherx(x):
#This is script just returns the letter depnding on the input
#This is the function that encrypts the text
def encrypt(text):
encrypted = ""
key = ""
totalscan = len(text)
scan = 0
while scan < totalscan:
prekey = random.randint(1, 26)
letter = text[scan]
letternum = numberout(letter)
encryptout = ""
if letternum == "":
encryptout = " "
prekey = ""
else:
lettersum = prekey+int(letternum)
if lettersum > 26:
lettersum = lettersum % 26
encryptout = letterout(lettersum)
if key != "":
if prekey == "":
key = key
else:
key = key + ", " + str(prekey)
else:
if prekey == "":
key = key
else:
key = key + str(prekey)
encrypted += encryptout
scan += 1
print("Your encrypted message: "+encrypted)
print("Here is your key: "+key)
def decrypt(text):
decrypted = ""
key = input("What is the key(Key Numbers Must Be Separated By Commas With Spaces, e.g. 1, 2, 4): ")
keylist = key.split(', ')
print("Warning: Your key length must be equal to the number of characters in the text your are trying to decrypt, or this decryption will be unsuccessful")
totalscan = len(text)
scan = 0
keyscan = 0
while scan < totalscan:
letter = text[scan]
letternum = numberout(letter)
decryptout = ""
if letternum == "":
decryptout = " "
scan = scan +1
else:
decryptout = int(letternum) - int(keylist[keyscan])
if decryptout < 0:
decryptout = letterout(26-abs(decryptout))
else:
decryptout = letterout(decryptout)
scan = scan + 1
keyscan = keyscan+1
decrypted += str(decryptout)
print("Your decrpyted message is: "+decrypted)
print("This message was decrypted with a key of: "+key)
if x == "encrypt":
encrypt(input("Please type in the text you would like to encrypt: "))
elif x == "decrypt":
decrypt(input("Please type in the text you would like to decrypt: "))
#encrypt(input("Please type in the text you would like to encrypt: "))
#decrypt(input("Please type in the text you would like to decrypt: "))
#rcipherx()
| mit |
eahneahn/free | lib/python2.7/site-packages/pygments/styles/vim.py | 364 | 1976 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| agpl-3.0 |
SnappyDataInc/spark | examples/src/main/python/mllib/fpgrowth_example.py | 158 | 1280 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.mllib.fpm import FPGrowth
# $example off$
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="FPGrowth")
# $example on$
data = sc.textFile("data/mllib/sample_fpgrowth.txt")
transactions = data.map(lambda line: line.strip().split(' '))
model = FPGrowth.train(transactions, minSupport=0.2, numPartitions=10)
result = model.freqItemsets().collect()
for fi in result:
print(fi)
# $example off$
| apache-2.0 |
webjunkie/python-social-auth | social/backends/slack.py | 68 | 2414 | """
Slack OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/slack.html
https://api.slack.com/docs/oauth
"""
import re
from social.backends.oauth import BaseOAuth2
class SlackOAuth2(BaseOAuth2):
"""Slack OAuth authentication backend"""
name = 'slack'
AUTHORIZATION_URL = 'https://slack.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://slack.com/api/oauth.access'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
REDIRECT_STATE = False
EXTRA_DATA = [
('id', 'id'),
('name', 'name'),
('real_name', 'real_name')
]
def get_user_details(self, response):
"""Return user details from Slack account"""
# Build the username with the team $username@$team_url
# Necessary to get unique names for all of slack
username = response.get('user')
if self.setting('USERNAME_WITH_TEAM', True):
match = re.search(r'//([^.]+)\.slack\.com', response['url'])
username = '{0}@{1}'.format(username, match.group(1))
out = {'username': username}
if 'profile' in response:
out.update({
'email': response['profile'].get('email'),
'fullname': response['profile'].get('real_name'),
'first_name': response['profile'].get('first_name'),
'last_name': response['profile'].get('last_name')
})
return out
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
# Has to be two calls, because the users.info requires a username,
# And we want the team information. Check auth.test details at:
# https://api.slack.com/methods/auth.test
auth_test = self.get_json('https://slack.com/api/auth.test', params={
'token': access_token
})
# https://api.slack.com/methods/users.info
user_info = self.get_json('https://slack.com/api/users.info', params={
'token': access_token,
'user': auth_test.get('user_id')
})
if user_info.get('user'):
# Capture the user data, if available based on the scope
auth_test.update(user_info['user'])
# Clean up user_id vs id
auth_test['id'] = auth_test['user_id']
auth_test.pop('ok', None)
auth_test.pop('user_id', None)
return auth_test
| bsd-3-clause |
DESHRAJ/crowdsource-platform | crowdsourcing/models.py | 4 | 22804 | from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from oauth2client.django_orm import FlowField, CredentialsField
from crowdsourcing.utils import get_delimiter
import pandas as pd
import os
class RegistrationModel(models.Model):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class PasswordResetModel(models.Model):
user = models.OneToOneField(User)
reset_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Region(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the region!', })
code = models.CharField(max_length=16, error_messages={'required': 'Please specify the region code!', })
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Country(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the country!', })
code = models.CharField(max_length=8, error_messages={'required': 'Please specify the country code!', })
region = models.ForeignKey(Region)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class City(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the city!', })
country = models.ForeignKey(Country)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class Address(models.Model):
street = models.CharField(max_length=128, error_messages={'required': 'Please specify the street name!', })
country = models.ForeignKey(Country)
city = models.ForeignKey(City)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s, %s, %s' % (self.street, self.city, self.country)
class Role(models.Model):
name = models.CharField(max_length=32, unique=True, error_messages={'required': 'Please specify the role name!',
'unique': 'The role %(value)r already exists. Please provide another name!'})
is_active = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Language(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the language!'})
iso_code = models.CharField(max_length=8)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserProfile(models.Model):
user = models.OneToOneField(User)
gender_choices = (('M', 'Male'), ('F', 'Female'))
gender = models.CharField(max_length=1, choices=gender_choices)
address = models.ForeignKey(Address, null=True)
birthday = models.DateField(null=True, error_messages={'invalid': "Please enter a correct date format"})
nationality = models.ManyToManyField(Country, through='UserCountry')
verified = models.BooleanField(default=False)
picture = models.BinaryField(null=True)
friends = models.ManyToManyField('self', through='Friendship',
symmetrical=False)
roles = models.ManyToManyField(Role, through='UserRole')
deleted = models.BooleanField(default=False)
languages = models.ManyToManyField(Language, through='UserLanguage')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserCountry(models.Model):
country = models.ForeignKey(Country)
user = models.ForeignKey(UserProfile)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Skill(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the skill name!"})
description = models.CharField(max_length=512, error_messages={'required': "Please enter the skill description!"})
verified = models.BooleanField(default=False)
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Worker(models.Model):
profile = models.OneToOneField(UserProfile)
skills = models.ManyToManyField(Skill, through='WorkerSkill')
deleted = models.BooleanField(default=False)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class WorkerSkill(models.Model):
worker = models.ForeignKey(Worker)
skill = models.ForeignKey(Skill)
level = models.IntegerField(null=True)
verified = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'skill')
class Requester(models.Model):
profile = models.OneToOneField(UserProfile)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class UserRole(models.Model):
user_profile = models.ForeignKey(UserProfile)
role = models.ForeignKey(Role)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Friendship(models.Model):
user_source = models.ForeignKey(UserProfile, related_name='user_source')
user_target = models.ForeignKey(UserProfile, related_name='user_target')
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Category(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Project(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the project name!"})
start_date = models.DateTimeField(auto_now_add=True, auto_now=False)
end_date = models.DateTimeField(auto_now_add=True, auto_now=False)
owner = models.ForeignKey(Requester, related_name='project_owner')
description = models.CharField(max_length=1024, default='')
collaborators = models.ManyToManyField(Requester, through='ProjectRequester')
keywords = models.TextField(null=True)
save_to_drive = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
categories = models.ManyToManyField(Category, through='ProjectCategory')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ProjectRequester(models.Model):
"""
Tracks the list of requesters that collaborate on a specific project
"""
requester = models.ForeignKey(Requester)
project = models.ForeignKey(Project)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('requester', 'project')
class Template(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the template name!"})
owner = models.ForeignKey(UserProfile)
source_html = models.TextField(default=None, null=True)
price = models.FloatField(default=0)
share_with_others = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Module(models.Model):
"""
aka Milestone
This is a group of similar tasks of the same kind.
Fields
-repetition: number of times a task needs to be performed
"""
name = models.CharField(max_length=128, error_messages={'required': "Please enter the module name!"})
description = models.TextField(error_messages={'required': "Please enter the module description!"})
owner = models.ForeignKey(Requester)
project = models.ForeignKey(Project, related_name='modules')
categories = models.ManyToManyField(Category, through='ModuleCategory')
keywords = models.TextField(null=True)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'In Review'),
(3, 'In Progress'),
(4, 'Completed')
)
permission_types = ((1, "Others:Read+Write::Workers:Read+Write"),
(2, 'Others:Read::Workers:Read+Write'),
(3, 'Others:Read::Workers:Read'),
(4, 'Others:None::Workers:Read')
)
status = models.IntegerField(choices=statuses, default=1)
price = models.FloatField()
repetition = models.IntegerField(default=1)
module_timeout = models.IntegerField(default=0)
has_data_set = models.BooleanField(default=False)
data_set_location = models.CharField(max_length=256, default='No data set', null=True)
task_time = models.FloatField(default=0) # in minutes
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
template = models.ManyToManyField(Template, through='ModuleTemplate')
is_micro = models.BooleanField(default=True)
is_prototype = models.BooleanField(default=False)
min_rating = models.FloatField(default=0)
allow_feedback = models.BooleanField(default=True)
feedback_permissions = models.IntegerField(choices=permission_types, default=1)
class ModuleCategory(models.Model):
module = models.ForeignKey(Module)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('category', 'module')
class ProjectCategory(models.Model):
project = models.ForeignKey(Project)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('project', 'category')
class TemplateItem(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the name of the template item!"})
template = models.ForeignKey(Template, related_name='template_items')
id_string = models.CharField(max_length=128)
role = models.CharField(max_length=16)
icon = models.CharField(max_length=256, null=True)
data_source = models.CharField(max_length=256, null=True)
layout = models.CharField(max_length=16, default='column')
type = models.CharField(max_length=16)
sub_type = models.CharField(max_length=16)
values = models.TextField(null=True)
position = models.IntegerField()
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['position']
class ModuleTemplate(models.Model):
module = models.ForeignKey(Module)
template = models.ForeignKey(Template)
class TemplateItemProperties(models.Model):
template_item = models.ForeignKey(TemplateItem)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Task(models.Model):
module = models.ForeignKey(Module, related_name='module_tasks')
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Assigned'),
(4, 'Finished')
)
status = models.IntegerField(choices=statuses, default=1)
data = models.TextField(null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
price = models.FloatField(default=0)
class TaskWorker(models.Model):
task = models.ForeignKey(Task, related_name='task_workers')
worker = models.ForeignKey(Worker)
statuses = ((1, 'In Progress'),
(2, 'Submitted'),
(3, 'Accepted'),
(4, 'Rejected'),
(5, 'Returned'),
(6, 'Skipped')
)
task_status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
is_paid = models.BooleanField(default=False)
class TaskWorkerResult(models.Model):
task_worker = models.ForeignKey(TaskWorker, related_name='task_worker_results')
result = models.TextField(null=True)
template_item = models.ForeignKey(TemplateItem)
# TODO: To be refined
statuses = ((1, 'Created'),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class WorkerModuleApplication(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ActivityLog(models.Model):
"""
Track all user's activities: Create, Update and Delete
"""
activity = models.CharField(max_length=512)
author = models.ForeignKey(User)
created_timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
class Qualification(models.Model):
module = models.ForeignKey(Module)
# TODO: To be refined
types = ((1, "Strict"),
(2, 'Flexible'))
type = models.IntegerField(choices=types, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class QualificationItem(models.Model):
qualification = models.ForeignKey(Qualification)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserLanguage(models.Model):
language = models.ForeignKey(Language)
user = models.ForeignKey(UserProfile)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Currency(models.Model):
name = models.CharField(max_length=32)
iso_code = models.CharField(max_length=8)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserPreferences(models.Model):
user = models.OneToOneField(User)
language = models.ForeignKey(Language)
currency = models.ForeignKey(Currency)
login_alerts = models.SmallIntegerField(default=0)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class RequesterRanking(models.Model):
requester_name = models.CharField(max_length=128)
requester_payRank = models.FloatField()
requester_fairRank = models.FloatField()
requester_speedRank = models.FloatField()
requester_communicationRank = models.FloatField()
requester_numberofReviews = models.IntegerField(default=0)
class ModuleRating(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
value = models.IntegerField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class ModuleReview(models.Model):
worker = models.ForeignKey(Worker)
anonymous = models.BooleanField(default=False)
module = models.ForeignKey(Module)
comments = models.TextField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class FlowModel(models.Model):
id = models.OneToOneField(User, primary_key=True)
flow = FlowField()
class AccountModel(models.Model):
name = models.CharField(max_length=128)
type = models.CharField(max_length=16)
email = models.EmailField()
access_token = models.TextField(max_length=2048)
root = models.CharField(max_length=256)
is_active = models.IntegerField()
quota = models.BigIntegerField()
used_space = models.BigIntegerField()
assigned_space = models.BigIntegerField()
status = models.IntegerField(default=quota)
owner = models.ForeignKey(User)
class CredentialsModel(models.Model):
account = models.ForeignKey(AccountModel)
credential = CredentialsField()
class TemporaryFlowModel(models.Model):
user = models.ForeignKey(User)
type = models.CharField(max_length=16)
email = models.EmailField()
class BookmarkedProjects(models.Model):
profile = models.ForeignKey(UserProfile)
project = models.ForeignKey(Project)
class Conversation(models.Model):
subject = models.CharField(max_length=64)
sender = models.ForeignKey(User, related_name='sender')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
deleted = models.BooleanField(default=False)
recipients = models.ManyToManyField(User, through='ConversationRecipient')
class Message(models.Model):
conversation = models.ForeignKey(Conversation, related_name='messages')
sender = models.ForeignKey(User)
body = models.TextField(max_length=8192)
deleted = models.BooleanField(default=False)
status = models.IntegerField(default=1) # 1:Sent 2:Delivered 3:Read
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ConversationRecipient(models.Model):
recipient = models.ForeignKey(User, related_name='recipients')
conversation = models.ForeignKey(Conversation, related_name='conversation_recipient')
date_added = models.DateTimeField(auto_now_add=True, auto_now=False)
class UserMessage(models.Model):
message = models.ForeignKey(Message)
user = models.ForeignKey(User)
deleted = models.BooleanField(default=False)
class RequesterInputFile(models.Model):
# TODO will need save files on a server rather than in a temporary folder
file = models.FileField(upload_to='tmp/')
deleted = models.BooleanField(default=False)
def parse_csv(self):
delimiter = get_delimiter(self.file.name)
df = pd.DataFrame(pd.read_csv(self.file, sep=delimiter))
return df.to_dict(orient='records')
def delete(self, *args, **kwargs):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(root, self.file.url[1:])
os.remove(path)
super(RequesterInputFile, self).delete(*args, **kwargs)
class WorkerRequesterRating(models.Model):
origin = models.ForeignKey(UserProfile, related_name='rating_origin')
target = models.ForeignKey(UserProfile, related_name='rating_target')
module = models.ForeignKey(Module, related_name='rating_module')
weight = models.FloatField(default=2)
origin_type = models.CharField(max_length=16)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Comment(models.Model):
sender = models.ForeignKey(UserProfile, related_name='comment_sender')
body = models.TextField(max_length=8192)
parent = models.ForeignKey('self', related_name='reply_to', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['created_timestamp']
class ModuleComment(models.Model):
module = models.ForeignKey(Module, related_name='modulecomment_module')
comment = models.ForeignKey(Comment, related_name='modulecomment_comment')
deleted = models.BooleanField(default=False)
class TaskComment(models.Model):
task = models.ForeignKey(Task, related_name='taskcomment_task')
comment = models.ForeignKey(Comment, related_name='taskcomment_comment')
deleted = models.BooleanField(default=False) | mit |
andela-bojengwa/talk | venv/lib/python2.7/site-packages/pip/req/req_set.py | 79 | 24967 | from __future__ import absolute_import
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.locations import (PIP_DELETE_MARKER_FILENAME, build_prefix)
from pip.req.req_install import InstallRequirement
from pip.utils import (display_path, rmtree, dist_in_usersite,
_make_build_dir, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
from pip.wheel import wheel_ext
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None):
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
if not install_req.match_markers():
logger.debug("Ignore %s: markers %r don't match",
install_req.name, install_req.markers)
return
name = install_req.name
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def locate_files(self):
# FIXME: duplicates code from prepare_files; relevant code should
# probably be factored out into a separate method
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
if req_to_install.satisfied_by:
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(
self.src_dir
)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(
self.build_dir,
)
if (req_to_install.source_dir is not None
and not os.path.isdir(req_to_install.source_dir)):
raise InstallationError(
'Could not install requirement %s because source folder %s'
' does not exist (perhaps --no-download was used without '
'first running an equivalent install with --no-install?)' %
(req_to_install, req_to_install.source_dir)
)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
from pip.index import Link
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
# ############################################# #
# # Search for archive to fulfill requirement # #
# ############################################# #
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound as exc:
not_found = exc
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.info(
'Requirement already up-to-date: %s',
req_to_install,
)
else:
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
elif install:
if (req_to_install.url
and req_to_install.url.lower().startswith('file:')):
path = url_to_path(req_to_install.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
is_wheel = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
# NB: This call can result in the creation of a temporary
# build directory
location = req_to_install.build_location(
self.build_dir,
)
unpack = True
url = None
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
if os.path.exists(os.path.join(location, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, location)
)
else:
# FIXME: this won't upgrade when there's an existing
# package unpacked in `location`
if req_to_install.url is None:
if not_found:
raise not_found
url = finder.find_requirement(
req_to_install,
upgrade=self.upgrade,
)
else:
# FIXME: should req_to_install.url already be a
# link?
url = Link(req_to_install.url)
assert url
if url:
try:
if (
url.filename.endswith(wheel_ext)
and self.wheel_download_dir
):
# when doing 'pip wheel`
download_dir = self.wheel_download_dir
do_download = True
else:
download_dir = self.download_dir
do_download = self.is_download
unpack_url(
url, location, download_dir,
do_download, session=self.session,
)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, url)
)
else:
unpack = False
if unpack:
is_wheel = url and url.filename.endswith(wheel_ext)
if self.is_download:
req_to_install.source_dir = location
if not is_wheel:
# FIXME:https://github.com/pypa/pip/issues/1112
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
req_to_install.assert_source_matches_version()
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site
and not dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
install = False
# ###################### #
# # parse dependencies # #
# ###################### #
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
if is_wheel:
dist = list(
pkg_resources.find_distributions(location)
)[0]
else: # sdists
if req_to_install.satisfied_by:
dist = req_to_install.satisfied_by
else:
dist = req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not self.ignore_dependencies:
for subreq in dist.requires(
req_to_install.extras):
if self.has_requirement(
subreq.project_name):
# FIXME: check for conflict
continue
subreq = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install)
# cleanup tmp src
if (self.is_download or
req_to_install._temp_build_dir is not None):
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
if self._pip_has_created_build_dir():
logger.debug('Removing temporary dir %s...', self.build_dir)
rmtree(self.build_dir)
def _pip_has_created_build_dir(self):
return (
self.build_dir == build_prefix
and os.path.exists(
os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)
)
)
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = [r for r in self.requirements.values()[::-1]
if not r.satisfied_by]
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# move the distribute-0.7.X wrapper to the end because it does not
# install a setuptools package. by moving it to the end, we ensure it's
# setuptools dependency is handled first, which will provide the
# setuptools package
# TODO: take this out later
distribute_req = pkg_resources.Requirement.parse("distribute>=0.7")
for req in to_install:
if (req.name == 'distribute'
and req.installed_version is not None
and req.installed_version in distribute_req):
to_install.remove(req)
to_install.append(req)
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# when upgrading from distribute-0.6.X to the new merged
# setuptools in py2, we need to force setuptools to uninstall
# distribute. In py3, which is always using distribute, this
# conversion is already happening in distribute's
# pkg_resources. It's ok *not* to check if setuptools>=0.7
# because if someone were actually trying to ugrade from
# distribute to setuptools 0.6.X, then all this could do is
# actually help, although that upgade path was certainly never
# "supported"
# TODO: remove this later
if requirement.name == 'setuptools':
try:
# only uninstall distribute<0.7. For >=0.7, setuptools
# will also be present, and that's what we need to
# uninstall
distribute_requirement = \
pkg_resources.Requirement.parse("distribute<0.7")
existing_distribute = \
pkg_resources.get_distribution("distribute")
if existing_distribute in distribute_requirement:
requirement.conflicts_with = existing_distribute
except pkg_resources.DistributionNotFound:
# distribute wasn't installed, so nothing to do
pass
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with
and not requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with
and requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
| mit |
apurvbhartia/gnuradio-routing | gr-wxgui/grc/top_block_gui.py | 18 | 2250 | # Copyright 2008, 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import wx
from gnuradio import gr
import panel
default_gui_size = (200, 100)
class top_block_gui(gr.top_block):
"""gr top block with wx gui app and grid sizer."""
def __init__(self, title='', size=default_gui_size):
"""
Initialize the gr top block.
Create the wx gui elements.
@param title the main window title
@param size the main window size tuple in pixels
@param icon the file path to an icon or None
"""
#initialize
gr.top_block.__init__(self)
self._size = size
#create gui elements
self._app = wx.App()
self._frame = wx.Frame(None, title=title)
self._panel = panel.Panel(self._frame)
self.Add = self._panel.Add
self.GridAdd = self._panel.GridAdd
self.GetWin = self._panel.GetWin
def SetIcon(self, *args, **kwargs): self._frame.SetIcon(*args, **kwargs)
def Run(self, start=True):
"""
Setup the wx gui elements.
Start the gr top block.
Block with the wx main loop.
"""
#set minimal window size
self._frame.SetSizeHints(*self._size)
#create callback for quit
def _quit(event):
self.stop(); self.wait()
self._frame.Destroy()
#setup app
self._frame.Bind(wx.EVT_CLOSE, _quit)
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._sizer.Add(self._panel, 0, wx.EXPAND)
self._frame.SetSizerAndFit(self._sizer)
self._frame.SetAutoLayout(True)
self._frame.Show(True)
self._app.SetTopWindow(self._frame)
#start flow graph
if start: self.start()
#blocking main loop
self._app.MainLoop()
| gpl-3.0 |
borisroman/vdsm | vdsm_hooks/ovs/ovs_after_network_setup_fail.py | 1 | 1636 | #!/usr/bin/env python
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from functools import partial
import traceback
from vdsm import supervdsm
import hooking
import ovs_utils
log = partial(ovs_utils.log, tag='ovs_after_network_setup_fail: ')
def main():
setup_nets_config = hooking.read_json()
in_rollback = setup_nets_config['request']['options'].get('_inRollback')
if in_rollback:
log('Configuration failed with _inRollback=True.')
else:
log('Configuration failed. At this point, non-OVS rollback should be '
'done. Executing OVS rollback.')
supervdsm.getProxy().setupNetworks(
{}, {}, {'connectivityCheck': False, '_inRollback': True,
'_inOVSRollback': True})
if __name__ == '__main__':
try:
main()
except:
hooking.exit_hook(traceback.format_exc())
| gpl-2.0 |
40223220/2015_cdb_g7_40223220 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/__init__.py | 637 | 3505 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
#if "PY_SAX_PARSER" in os.environ:
# default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| gpl-3.0 |
tntnatbry/tensorflow | tensorflow/contrib/learn/python/learn/estimators/state_saving_rnn_estimator.py | 9 | 38410 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator for State Saving RNNs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import layers
from tensorflow.contrib import metrics
from tensorflow.contrib import rnn as rnn_cell
from tensorflow.contrib.framework.python.framework import deprecated
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.training import momentum as momentum_opt
from tensorflow.python.util import nest
def construct_state_saving_rnn(cell,
inputs,
num_label_columns,
state_saver,
state_name,
scope='rnn'):
"""Build a state saving RNN and apply a fully connected layer.
Args:
cell: An instance of `RNNCell`.
inputs: A length `T` list of inputs, each a `Tensor` of shape
`[batch_size, input_size, ...]`.
num_label_columns: The desired output dimension.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
scope: `VariableScope` for the created subgraph; defaults to "rnn".
Returns:
activations: The output of the RNN, projected to `num_label_columns`
dimensions, a `Tensor` of shape `[batch_size, T, num_label_columns]`.
final_state: The final state output by the RNN
"""
with ops.name_scope(scope):
rnn_outputs, final_state = core_rnn.static_state_saving_rnn(
cell=cell,
inputs=inputs,
state_saver=state_saver,
state_name=state_name,
scope=scope)
# Convert rnn_outputs from a list of time-major order Tensors to a single
# Tensor of batch-major order.
rnn_outputs = array_ops.stack(rnn_outputs, axis=1)
activations = layers.fully_connected(
inputs=rnn_outputs,
num_outputs=num_label_columns,
activation_fn=None,
trainable=True)
# Use `identity` to rename `final_state`.
final_state = array_ops.identity(
final_state, name=rnn_common.RNNKeys.FINAL_STATE_KEY)
return activations, final_state
# TODO(jtbates): As per cl/14156248, remove this function and switch from
# MetricSpec to metric ops.
def _mask_multivalue(sequence_length, metric):
"""Wrapper function that masks values by `sequence_length`.
Args:
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
metric: A metric function. Its signature must contain `predictions` and
`labels`.
Returns:
A metric function that masks `predictions` and `labels` using
`sequence_length` and then applies `metric` to the results.
"""
@functools.wraps(metric)
def _metric(predictions, labels, *args, **kwargs):
predictions, labels = rnn_common.mask_activations_and_labels(
predictions, labels, sequence_length)
return metric(predictions, labels, *args, **kwargs)
return _metric
def _get_default_metrics(problem_type, sequence_length):
"""Returns default `MetricSpec`s for `problem_type`.
Args:
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
Returns:
A `dict` mapping strings to `MetricSpec`s.
"""
default_metrics = {}
if problem_type == constants.ProblemType.CLASSIFICATION:
default_metrics['accuracy'] = metric_spec.MetricSpec(
metric_fn=_mask_multivalue(sequence_length, metrics.streaming_accuracy),
prediction_key=prediction_key.PredictionKey.CLASSES)
elif problem_type == constants.ProblemType.LINEAR_REGRESSION:
pass
return default_metrics
def _multi_value_loss(
activations, labels, sequence_length, target_column, features):
"""Maps `activations` from the RNN to loss for multi value models.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
labels: A `Tensor` with length `[batch_size, padded_length]`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
Returns:
A scalar `Tensor` containing the loss.
"""
with ops.name_scope('MultiValueLoss'):
activations_masked, labels_masked = rnn_common.mask_activations_and_labels(
activations, labels, sequence_length)
return target_column.loss(activations_masked, labels_masked, features)
def _get_name_or_parent_names(column):
"""Gets the name of a column or its parent columns' names.
Args:
column: A sequence feature column derived from `FeatureColumn`.
Returns:
A list of the name of `column` or the names of its parent columns,
if any exist.
"""
# pylint: disable=protected-access
parent_columns = feature_column_ops._get_parent_columns(column)
if parent_columns:
return [x.name for x in parent_columns]
return [column.name]
def _prepare_features_for_sqss(features, labels, mode,
sequence_feature_columns,
context_feature_columns):
"""Prepares features for batching by the SQSS.
In preparation for batching by the SQSS, this function:
- Extracts the input key from the features dict.
- Separates sequence and context features dicts from the features dict.
- Adds the labels tensor to the sequence features dict.
Args:
features: A dict of Python string to an iterable of `Tensor` or
`SparseTensor` of rank 2, the `features` argument of a TF.Learn model_fn.
labels: An iterable of `Tensor`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
Returns:
sequence_features: A dict mapping feature names to sequence features.
context_features: A dict mapping feature names to context features.
Raises:
ValueError: If `features` does not contain a value for every key in
`sequence_feature_columns` or `context_feature_columns`.
"""
# Extract sequence features.
feature_column_ops._check_supported_sequence_columns(sequence_feature_columns) # pylint: disable=protected-access
sequence_features = {}
for column in sequence_feature_columns:
for name in _get_name_or_parent_names(column):
feature = features.get(name, None)
if feature is None:
raise ValueError('No key in features for sequence feature: ' + name)
sequence_features[name] = feature
# Extract context features.
context_features = {}
if context_feature_columns is not None:
for column in context_feature_columns:
name = column.name
feature = features.get(name, None)
if feature is None:
raise ValueError('No key in features for context feature: ' + name)
context_features[name] = feature
# Add labels to the resulting sequence features dict.
if mode != model_fn.ModeKeys.INFER:
sequence_features[rnn_common.RNNKeys.LABELS_KEY] = labels
return sequence_features, context_features
def _read_batch(cell,
features,
labels,
mode,
num_unroll,
num_rnn_layers,
batch_size,
sequence_feature_columns,
context_feature_columns=None,
num_threads=3,
queue_capacity=1000,
seed=None):
"""Reads a batch from a state saving sequence queue.
Args:
cell: An initialized `RNNCell` to be used in the RNN.
features: A dict of Python string to an iterable of `Tensor`, the
`features` argument of a TF.Learn model_fn.
labels: An iterable of `Tensor`, the `labels` argument of a
TF.Learn model_fn.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
num_rnn_layers: Python integer, number of layers in the RNN.
batch_size: Python integer, the size of the minibatch produced by the SQSS.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_threads: The Python integer number of threads enqueuing input examples
into a queue. Defaults to 3.
queue_capacity: The max capacity of the queue in number of examples.
Needs to be at least `batch_size`. Defaults to 1000. When iterating
over the same input example multiple times reusing their keys the
`queue_capacity` must be smaller than the number of examples.
seed: Fixes the random seed used for generating input keys by the SQSS.
Returns:
batch: A `NextQueuedSequenceBatch` containing batch_size `SequenceExample`
values and their saved internal states.
"""
# Set batch_size=1 to initialize SQSS with cell's zero state.
values = cell.zero_state(batch_size=1, dtype=dtypes.float32)
# Set up stateful queue reader.
states = {}
state_names = _get_lstm_state_names(num_rnn_layers)
for i in range(num_rnn_layers):
states[state_names[i][0]] = array_ops.squeeze(values[i][0], axis=0)
states[state_names[i][1]] = array_ops.squeeze(values[i][1], axis=0)
sequences, context = _prepare_features_for_sqss(
features, labels, mode, sequence_feature_columns,
context_feature_columns)
return sqss.batch_sequences_with_states(
input_key='key',
input_sequences=sequences,
input_context=context,
input_length=None, # infer sequence lengths
initial_states=states,
num_unroll=num_unroll,
batch_size=batch_size,
pad=True, # pad to a multiple of num_unroll
make_keys_unique=True,
make_keys_unique_seed=seed,
num_threads=num_threads,
capacity=queue_capacity)
def _get_state_name(i):
"""Constructs the name string for state component `i`."""
return '{}_{}'.format(rnn_common.RNNKeys.STATE_PREFIX, i)
def state_tuple_to_dict(state):
"""Returns a dict containing flattened `state`.
Args:
state: A `Tensor` or a nested tuple of `Tensors`. All of the `Tensor`s must
have the same rank and agree on all dimensions except the last.
Returns:
A dict containing the `Tensor`s that make up `state`. The keys of the dict
are of the form "STATE_PREFIX_i" where `i` is the place of this `Tensor`
in a depth-first traversal of `state`.
"""
with ops.name_scope('state_tuple_to_dict'):
flat_state = nest.flatten(state)
state_dict = {}
for i, state_component in enumerate(flat_state):
state_name = _get_state_name(i)
state_value = (None if state_component is None else array_ops.identity(
state_component, name=state_name))
state_dict[state_name] = state_value
return state_dict
def _prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll):
"""Prepares features batched by the SQSS for input to a state-saving RNN.
Args:
sequence_features: A dict of sequence feature name to `Tensor` or
`SparseTensor`, with `Tensor`s of shape `[batch_size, num_unroll, ...]`
or `SparseTensors` of dense shape `[batch_size, num_unroll, d]`.
context_features: A dict of context feature name to `Tensor`, with
tensors of shape `[batch_size, 1, ...]` and type float32.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
Returns:
features_by_time: A list of length `num_unroll` with `Tensor` entries of
shape `[batch_size, sum(sequence_features dimensions) +
sum(context_features dimensions)]` of type float32.
Context features are copied into each time step.
"""
def _tile(feature):
return array_ops.squeeze(
array_ops.tile(array_ops.expand_dims(feature, 1), [1, num_unroll, 1]),
axis=2)
for feature in sequence_features.values():
if isinstance(feature, sparse_tensor.SparseTensor):
# Explicitly set dense_shape's shape to 3 ([batch_size, num_unroll, d])
# since it can't be statically inferred.
feature.dense_shape.set_shape([3])
sequence_features = layers.sequence_input_from_feature_columns(
columns_to_tensors=sequence_features,
feature_columns=sequence_feature_columns,
weight_collections=None,
scope=None)
# Explicitly set shape along dimension 1 to num_unroll for the unstack op.
sequence_features.set_shape([None, num_unroll, None])
if not context_features:
return array_ops.unstack(sequence_features, axis=1)
# TODO(jtbates): Call layers.input_from_feature_columns for context features.
context_features = [
_tile(context_features[k]) for k in sorted(context_features)
]
return array_ops.unstack(
array_ops.concat(
[sequence_features, array_ops.stack(context_features, 2)], axis=2),
axis=1)
def _get_rnn_model_fn(target_column,
problem_type,
optimizer,
num_unroll,
num_units,
num_rnn_layers,
num_threads,
queue_capacity,
batch_size,
sequence_feature_columns,
context_feature_columns=None,
predict_probabilities=False,
learning_rate=None,
gradient_clipping_norm=None,
dropout_keep_probabilities=None,
name='StateSavingRNNModel',
seed=None):
"""Creates a state saving RNN model function for an `Estimator`.
Args:
target_column: An initialized `TargetColumn`, used to calculate prediction
and loss.
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
optimizer: A subclass of `Optimizer`, an instance of an `Optimizer` or a
string.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
num_units: The number of units in the `RNNCell`.
num_rnn_layers: Python integer, number of layers in the RNN.
num_threads: The Python integer number of threads enqueuing input examples
into a queue.
queue_capacity: The max capacity of the queue in number of examples.
Needs to be at least `batch_size`. When iterating over the same input
example multiple times reusing their keys the `queue_capacity` must be
smaller than the number of examples.
batch_size: Python integer, the size of the minibatch produced by the SQSS.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
predict_probabilities: A boolean indicating whether to predict probabilities
for all classes.
Must only be used with `ProblemType.CLASSIFICATION`.
learning_rate: Learning rate used for optimization. This argument has no
effect if `optimizer` is an instance of an `Optimizer`.
gradient_clipping_norm: A float. Gradients will be clipped to this value.
dropout_keep_probabilities: a list of dropout keep probabilities or `None`.
If given a list, it must have length `num_rnn_layers + 1`.
name: A string that will be used to create a scope for the RNN.
seed: Fixes the random seed used for generating input keys by the SQSS.
Returns:
A model function to be passed to an `Estimator`.
Raises:
ValueError: `problem_type` is not one of
`ProblemType.LINEAR_REGRESSION`
or `ProblemType.CLASSIFICATION`.
ValueError: `predict_probabilities` is `True` for `problem_type` other
than `ProblemType.CLASSIFICATION`.
ValueError: `num_unroll` is not positive.
"""
if problem_type not in (constants.ProblemType.CLASSIFICATION,
constants.ProblemType.LINEAR_REGRESSION):
raise ValueError(
'problem_type must be ProblemType.LINEAR_REGRESSION or '
'ProblemType.CLASSIFICATION; got {}'.
format(problem_type))
if (problem_type != constants.ProblemType.CLASSIFICATION and
predict_probabilities):
raise ValueError(
'predict_probabilities can only be set to True for problem_type'
' ProblemType.CLASSIFICATION; got {}.'.format(problem_type))
if num_unroll <= 0:
raise ValueError('num_unroll must be positive; got {}.'.format(num_unroll))
def _rnn_model_fn(features, labels, mode):
"""The model to be passed to an `Estimator`."""
with ops.name_scope(name):
dropout = (dropout_keep_probabilities
if mode == model_fn.ModeKeys.TRAIN
else None)
cell = lstm_cell(num_units, num_rnn_layers, dropout)
batch = _read_batch(
cell=cell,
features=features,
labels=labels,
mode=mode,
num_unroll=num_unroll,
num_rnn_layers=num_rnn_layers,
batch_size=batch_size,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
num_threads=num_threads,
queue_capacity=queue_capacity,
seed=seed)
sequence_features = batch.sequences
context_features = batch.context
if mode != model_fn.ModeKeys.INFER:
labels = sequence_features.pop(rnn_common.RNNKeys.LABELS_KEY)
inputs = _prepare_inputs_for_rnn(sequence_features, context_features,
sequence_feature_columns, num_unroll)
state_name = _get_lstm_state_names(num_rnn_layers)
rnn_activations, final_state = construct_state_saving_rnn(
cell=cell,
inputs=inputs,
num_label_columns=target_column.num_label_columns,
state_saver=batch,
state_name=state_name)
loss = None # Created below for modes TRAIN and EVAL.
prediction_dict = rnn_common.multi_value_predictions(
rnn_activations, target_column, problem_type, predict_probabilities)
if mode != model_fn.ModeKeys.INFER:
loss = _multi_value_loss(rnn_activations, labels, batch.length,
target_column, features)
eval_metric_ops = None
if mode != model_fn.ModeKeys.INFER:
default_metrics = _get_default_metrics(problem_type, batch.length)
eval_metric_ops = estimator._make_metrics_ops( # pylint: disable=protected-access
default_metrics, features, labels, prediction_dict)
state_dict = state_tuple_to_dict(final_state)
prediction_dict.update(state_dict)
train_op = None
if mode == model_fn.ModeKeys.TRAIN:
train_op = optimizers.optimize_loss(
loss=loss,
global_step=None, # Get it internally.
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=gradient_clipping_norm,
summaries=optimizers.OPTIMIZER_SUMMARIES)
return model_fn.ModelFnOps(mode=mode,
predictions=prediction_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
return _rnn_model_fn
def _get_lstm_state_names(num_rnn_layers):
"""Returns a num_rnn_layers long list of lstm state name pairs.
Args:
num_rnn_layers: The number of layers in the RNN.
Returns:
A num_rnn_layers long list of lstm state name pairs of the form:
['lstm_state_cN', 'lstm_state_mN'] for all N from 0 to num_rnn_layers.
"""
return [['lstm_state_c' + str(i), 'lstm_state_m' + str(i)]
for i in range(num_rnn_layers)]
# TODO(jtbates): Allow users to specify cell types other than LSTM.
def lstm_cell(num_units, num_rnn_layers, dropout_keep_probabilities):
"""Constructs a `MultiRNNCell` with num_rnn_layers `BasicLSTMCell`s.
Args:
num_units: The number of units in the `RNNCell`.
num_rnn_layers: The number of layers in the RNN.
dropout_keep_probabilities: a list whose elements are either floats in
`[0.0, 1.0]` or `None`. It must have length `num_rnn_layers + 1`.
Returns:
An intiialized `MultiRNNCell`.
"""
cells = [
rnn_cell.BasicLSTMCell(num_units=num_units, state_is_tuple=True)
for _ in range(num_rnn_layers)
]
if dropout_keep_probabilities:
cells = rnn_common.apply_dropout(cells, dropout_keep_probabilities)
return rnn_cell.MultiRNNCell(cells)
class StateSavingRnnEstimator(estimator.Estimator):
def __init__(self,
problem_type,
num_units,
num_unroll,
batch_size,
sequence_feature_columns,
context_feature_columns=None,
num_classes=None,
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
predict_probabilities=False,
momentum=None,
gradient_clipping_norm=5.0,
dropout_keep_probabilities=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
num_threads=3,
queue_capacity=1000,
seed=None):
"""Initializes a StateSavingRnnEstimator.
Args:
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
num_units: The size of the RNN cells.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
batch_size: Python integer, the size of the minibatch.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_classes: The number of classes for categorization. Used only and
required if `problem_type` is `ProblemType.CLASSIFICATION`
num_rnn_layers: Number of RNN layers.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer` or a string. Strings must be
one of 'Adagrad', 'Adam', 'Ftrl', Momentum', 'RMSProp', or 'SGD'.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
predict_probabilities: A boolean indicating whether to predict
probabilities for all classes. Used only if `problem_type` is
`ProblemType.CLASSIFICATION`.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
dropout_keep_probabilities: a list of dropout keep probabilities or
`None`. If given a list, it must have length `num_rnn_layers + 1`.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
num_threads: The Python integer number of threads enqueuing input examples
into a queue. Defaults to 3.
queue_capacity: The max capacity of the queue in number of examples.
Needs to be at least `batch_size`. Defaults to 1000. When iterating
over the same input example multiple times reusing their keys the
`queue_capacity` must be smaller than the number of examples.
seed: Fixes the random seed used for generating input keys by the SQSS.
Raises:
ValueError: `problem_type` is not one of
`ProblemType.LINEAR_REGRESSION` or `ProblemType.CLASSIFICATION`.
ValueError: `problem_type` is `ProblemType.CLASSIFICATION` but
`num_classes` is not specified.
"""
name = 'MultiValueStateSavingRNN'
if problem_type == constants.ProblemType.LINEAR_REGRESSION:
name += 'Regressor'
target_column = layers.regression_target()
elif problem_type == constants.ProblemType.CLASSIFICATION:
if not num_classes:
raise ValueError('For CLASSIFICATION problem_type, num_classes must be '
'specified.')
target_column = layers.multi_class_target(n_classes=num_classes)
name += 'Classifier'
else:
raise ValueError(
'problem_type must be either ProblemType.LINEAR_REGRESSION '
'or ProblemType.CLASSIFICATION; got {}'.format(
problem_type))
if optimizer_type == 'Momentum':
optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum)
rnn_model_fn = _get_rnn_model_fn(
target_column=target_column,
problem_type=problem_type,
optimizer=optimizer_type,
num_unroll=num_unroll,
num_units=num_units,
num_rnn_layers=num_rnn_layers,
num_threads=num_threads,
queue_capacity=queue_capacity,
batch_size=batch_size,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
predict_probabilities=predict_probabilities,
learning_rate=learning_rate,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
name=name,
seed=seed)
super(StateSavingRnnEstimator, self).__init__(
model_fn=rnn_model_fn,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
@deprecated('2017-04-01', 'multi_value_rnn_regressor is deprecated. '
'Please construct a StateSavingRnnEstimator directly.')
def multi_value_rnn_regressor(num_units,
num_unroll,
batch_size,
sequence_feature_columns,
context_feature_columns=None,
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
momentum=None,
gradient_clipping_norm=5.0,
dropout_keep_probabilities=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
num_threads=3,
queue_capacity=1000,
seed=None):
"""Creates a RNN `Estimator` that predicts sequences of values.
Args:
num_units: The size of the RNN cells.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
batch_size: Python integer, the size of the minibatch.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_rnn_layers: Number of RNN layers. Leave this at its default value 1
if passing a `cell_type` that is already a MultiRNNCell.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer` or a string. Strings must be
one of 'Adagrad', 'Momentum' or 'SGD'.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
dropout_keep_probabilities: a list of dropout keep probabilities or `None`.
If given a list, it must have length `num_rnn_layers + 1`.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
num_threads: The Python integer number of threads enqueuing input examples
into a queue. Defaults to 3.
queue_capacity: The max capacity of the queue in number of examples.
Needs to be at least `batch_size`. Defaults to 1000. When iterating
over the same input example multiple times reusing their keys the
`queue_capacity` must be smaller than the number of examples.
seed: Fixes the random seed used for generating input keys by the SQSS.
Returns:
An initialized `Estimator`.
"""
return StateSavingRnnEstimator(
constants.ProblemType.LINEAR_REGRESSION,
num_units,
num_unroll,
batch_size,
sequence_feature_columns,
context_feature_columns=context_feature_columns,
num_classes=None,
num_rnn_layers=num_rnn_layers,
optimizer_type=optimizer_type,
learning_rate=learning_rate,
predict_probabilities=False,
momentum=momentum,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn,
num_threads=num_threads,
queue_capacity=queue_capacity,
seed=seed)
@deprecated('2017-04-01', 'multi_value_rnn_classifier is deprecated. '
'Please construct a StateSavingRnnEstimator directly.')
def multi_value_rnn_classifier(num_classes,
num_units,
num_unroll,
batch_size,
sequence_feature_columns,
context_feature_columns=None,
num_rnn_layers=1,
optimizer_type='SGD',
learning_rate=0.1,
predict_probabilities=False,
momentum=None,
gradient_clipping_norm=5.0,
dropout_keep_probabilities=None,
model_dir=None,
config=None,
feature_engineering_fn=None,
num_threads=3,
queue_capacity=1000,
seed=None):
"""Creates a RNN `Estimator` that predicts sequences of labels.
Args:
num_classes: The number of classes for categorization.
num_units: The size of the RNN cells.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
batch_size: Python integer, the size of the minibatch.
sequence_feature_columns: An iterable containing all the feature columns
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_rnn_layers: Number of RNN layers.
optimizer_type: The type of optimizer to use. Either a subclass of
`Optimizer`, an instance of an `Optimizer` or a string. Strings must be
one of 'Adagrad', 'Momentum' or 'SGD'.
learning_rate: Learning rate. This argument has no effect if `optimizer`
is an instance of an `Optimizer`.
predict_probabilities: A boolean indicating whether to predict probabilities
for all classes.
momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
then no clipping is performed.
dropout_keep_probabilities: a list of dropout keep probabilities or `None`.
If given a list, it must have length `num_rnn_layers + 1`.
model_dir: The directory in which to save and restore the model graph,
parameters, etc.
config: A `RunConfig` instance.
feature_engineering_fn: Takes features and labels which are the output of
`input_fn` and returns features and labels which will be fed into
`model_fn`. Please check `model_fn` for a definition of features and
labels.
num_threads: The Python integer number of threads enqueuing input examples
into a queue. Defaults to 3.
queue_capacity: The max capacity of the queue in number of examples.
Needs to be at least `batch_size`. Defaults to 1000. When iterating
over the same input example multiple times reusing their keys the
`queue_capacity` must be smaller than the number of examples.
seed: Fixes the random seed used for generating input keys by the SQSS.
Returns:
An initialized `Estimator`.
"""
return StateSavingRnnEstimator(
constants.ProblemType.CLASSIFICATION,
num_units,
num_unroll,
batch_size,
sequence_feature_columns,
context_feature_columns=context_feature_columns,
num_classes=num_classes,
num_rnn_layers=num_rnn_layers,
optimizer_type=optimizer_type,
learning_rate=learning_rate,
predict_probabilities=predict_probabilities,
momentum=momentum,
gradient_clipping_norm=gradient_clipping_norm,
dropout_keep_probabilities=dropout_keep_probabilities,
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn,
num_threads=num_threads,
queue_capacity=queue_capacity,
seed=seed)
| apache-2.0 |
TripleDogDare/RadioWCSpy | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py | 153 | 9905 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
try:
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
_, wlist, _ = select.select([], [self.socket], [],
self.socket.gettimeout())
if not wlist:
raise timeout()
continue
def sendall(self, data):
while len(data):
sent = self._send_until_done(data)
data = data[sent:]
def close(self):
if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| mit |
elkingtonmcb/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
jnishi/chainer | chainer/links/connection/lstm.py | 2 | 12108 | import six
import chainer
from chainer.backends import cuda
from chainer.functions.activation import lstm
from chainer.functions.array import concat
from chainer.functions.array import split_axis
from chainer import initializers
from chainer import link
from chainer.links.connection import linear
from chainer import utils
from chainer import variable
class LSTMBase(link.Chain):
def __init__(self, in_size, out_size=None, lateral_init=None,
upward_init=None, bias_init=None, forget_bias_init=None):
if out_size is None:
out_size, in_size = in_size, None
super(LSTMBase, self).__init__()
if bias_init is None:
bias_init = 0
if forget_bias_init is None:
forget_bias_init = 1
self.state_size = out_size
self.lateral_init = lateral_init
self.upward_init = upward_init
self.bias_init = bias_init
self.forget_bias_init = forget_bias_init
with self.init_scope():
self.upward = linear.Linear(in_size, 4 * out_size, initialW=0)
self.lateral = linear.Linear(out_size, 4 * out_size, initialW=0,
nobias=True)
if in_size is not None:
self._initialize_params()
def _initialize_params(self):
lateral_init = initializers._get_initializer(self.lateral_init)
upward_init = initializers._get_initializer(self.upward_init)
bias_init = initializers._get_initializer(self.bias_init)
forget_bias_init = initializers._get_initializer(self.forget_bias_init)
for i in six.moves.range(0, 4 * self.state_size, self.state_size):
lateral_init(self.lateral.W.array[i:i + self.state_size, :])
upward_init(self.upward.W.array[i:i + self.state_size, :])
a, i, f, o = lstm._extract_gates(
self.upward.b.array.reshape(1, 4 * self.state_size, 1))
bias_init(a)
bias_init(i)
forget_bias_init(f)
bias_init(o)
class StatelessLSTM(LSTMBase):
"""Stateless LSTM layer.
This is a fully-connected LSTM layer as a chain. Unlike the
:func:`~chainer.functions.lstm` function, this chain holds upward and
lateral connections as child links. This link doesn't keep cell and
hidden states.
Args:
in_size (int or None): Dimension of input vectors. If ``None``,
parameter initialization will be deferred until the first forward
data pass at which time the size will be determined.
out_size (int): Dimensionality of output vectors.
Attributes:
upward (chainer.links.Linear): Linear layer of upward connections.
lateral (chainer.links.Linear): Linear layer of lateral connections.
.. admonition:: Example
There are several ways to make a StatelessLSTM link.
Let a two-dimensional input array :math:`x`, a cell state array
:math:`h`, and the output array of the previous step :math:`h` be:
>>> x = np.zeros((1, 10), dtype=np.float32)
>>> c = np.zeros((1, 20), dtype=np.float32)
>>> h = np.zeros((1, 20), dtype=np.float32)
1. Give both ``in_size`` and ``out_size`` arguments:
>>> l = L.StatelessLSTM(10, 20)
>>> c_new, h_new = l(c, h, x)
>>> c_new.shape
(1, 20)
>>> h_new.shape
(1, 20)
2. Omit ``in_size`` argument or fill it with ``None``:
The below two cases are the same.
>>> l = L.StatelessLSTM(20)
>>> c_new, h_new = l(c, h, x)
>>> c_new.shape
(1, 20)
>>> h_new.shape
(1, 20)
>>> l = L.StatelessLSTM(None, 20)
>>> c_new, h_new = l(c, h, x)
>>> c_new.shape
(1, 20)
>>> h_new.shape
(1, 20)
"""
def forward(self, c, h, x):
"""Returns new cell state and updated output of LSTM.
Args:
c (~chainer.Variable): Cell states of LSTM units.
h (~chainer.Variable): Output at the previous time step.
x (~chainer.Variable): A new batch from the input sequence.
Returns:
tuple of ~chainer.Variable: Returns ``(c_new, h_new)``, where
``c_new`` represents new cell state, and ``h_new`` is updated
output of LSTM units.
"""
if self.upward.W.array is None:
in_size = x.size // x.shape[0]
with chainer.using_device(self.device):
self.upward._initialize_params(in_size)
self._initialize_params()
lstm_in = self.upward(x)
if h is not None:
lstm_in += self.lateral(h)
if c is None:
xp = self.xp
with chainer.using_device(self.device):
c = variable.Variable(
xp.zeros((x.shape[0], self.state_size), dtype=x.dtype))
return lstm.lstm(c, lstm_in)
class LSTM(LSTMBase):
"""Fully-connected LSTM layer.
This is a fully-connected LSTM layer as a chain. Unlike the
:func:`~chainer.functions.lstm` function, which is defined as a stateless
activation function, this chain holds upward and lateral connections as
child links.
It also maintains *states*, including the cell state and the output
at the previous time step. Therefore, it can be used as a *stateful LSTM*.
This link supports variable length inputs. The mini-batch size of the
current input must be equal to or smaller than that of the previous one.
The mini-batch size of ``c`` and ``h`` is determined as that of the first
input ``x``.
When mini-batch size of ``i``-th input is smaller than that of the previous
input, this link only updates ``c[0:len(x)]`` and ``h[0:len(x)]`` and
doesn't change the rest of ``c`` and ``h``.
So, please sort input sequences in descending order of lengths before
applying the function.
Args:
in_size (int): Dimension of input vectors. If it is ``None`` or
omitted, parameter initialization will be deferred until the first
forward data pass at which time the size will be determined.
out_size (int): Dimensionality of output vectors.
lateral_init: A callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
It is used for initialization of the lateral connections.
May be ``None`` to use default initialization.
upward_init: A callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value.
It is used for initialization of the upward connections.
May be ``None`` to use default initialization.
bias_init: A callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value
It is used for initialization of the biases of cell input,
input gate and output gate.and gates of the upward connection.
May be a scalar, in that case, the bias is
initialized by this value.
If it is ``None``, the cell-input bias is initialized to zero.
forget_bias_init: A callable that takes ``numpy.ndarray`` or
``cupy.ndarray`` and edits its value
It is used for initialization of the biases of the forget gate of
the upward connection.
May be a scalar, in that case, the bias is
initialized by this value.
If it is ``None``, the forget bias is initialized to one.
Attributes:
upward (~chainer.links.Linear): Linear layer of upward connections.
lateral (~chainer.links.Linear): Linear layer of lateral connections.
c (~chainer.Variable): Cell states of LSTM units.
h (~chainer.Variable): Output at the previous time step.
.. admonition:: Example
There are several ways to make a LSTM link.
Let a two-dimensional input array :math:`x` be:
>>> x = np.zeros((1, 10), dtype=np.float32)
1. Give both ``in_size`` and ``out_size`` arguments:
>>> l = L.LSTM(10, 20)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
2. Omit ``in_size`` argument or fill it with ``None``:
The below two cases are the same.
>>> l = L.LSTM(20)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
>>> l = L.LSTM(None, 20)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
"""
def __init__(self, in_size, out_size=None, lateral_init=None,
upward_init=None, bias_init=None, forget_bias_init=None):
if out_size is None:
in_size, out_size = None, in_size
super(LSTM, self).__init__(
in_size, out_size, lateral_init, upward_init, bias_init,
forget_bias_init)
self.reset_state()
def _to_device(self, device, skip_between_cupy_devices=False):
# Overrides Link._to_device
# TODO(niboshi): Avoid forcing concrete links to override _to_device
device = chainer.get_device(device)
super(LSTM, self)._to_device(
device, skip_between_cupy_devices=skip_between_cupy_devices)
if self.c is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.c, cuda.ndarray)):
self.c.to_device(device)
if self.h is not None:
if not (skip_between_cupy_devices
and device.xp is cuda.cupy
and isinstance(self.h, cuda.ndarray)):
self.h.to_device(device)
return self
def set_state(self, c, h):
"""Sets the internal state.
It sets the :attr:`c` and :attr:`h` attributes.
Args:
c (~chainer.Variable): A new cell states of LSTM units.
h (~chainer.Variable): A new output at the previous time step.
"""
assert isinstance(c, variable.Variable)
assert isinstance(h, variable.Variable)
c.to_device(self.device)
h.to_device(self.device)
self.c = c
self.h = h
def reset_state(self):
"""Resets the internal state.
It sets ``None`` to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def forward(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
if self.upward.W.array is None:
with chainer.using_device(self.device):
in_size = utils.size_of_shape(x.shape[1:])
self.upward._initialize_params(in_size)
self._initialize_params()
batch = x.shape[0]
lstm_in = self.upward(x)
h_rest = None
if self.h is not None:
h_size = self.h.shape[0]
if batch == 0:
h_rest = self.h
elif h_size < batch:
msg = ('The batch size of x must be equal to or less than'
'the size of the previous state h.')
raise TypeError(msg)
elif h_size > batch:
h_update, h_rest = split_axis.split_axis(
self.h, [batch], axis=0)
lstm_in += self.lateral(h_update)
else:
lstm_in += self.lateral(self.h)
if self.c is None:
with chainer.using_device(self.device):
self.c = variable.Variable(
self.xp.zeros((batch, self.state_size), dtype=x.dtype))
self.c, y = lstm.lstm(self.c, lstm_in)
if h_rest is None:
self.h = y
elif len(y.array) == 0:
self.h = h_rest
else:
self.h = concat.concat([y, h_rest], axis=0)
return y
| mit |
israeltobias/DownMedia | youtube-dl/youtube_dl/extractor/atresplayer.py | 5 | 7688 | from __future__ import unicode_literals
import time
import hmac
import hashlib
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
urlencode_postdata,
xpath_text,
)
class AtresPlayerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?atresplayer\.com/television/[^/]+/[^/]+/[^/]+/(?P<id>.+?)_\d+\.html'
_NETRC_MACHINE = 'atresplayer'
_TESTS = [
{
'url': 'http://www.atresplayer.com/television/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_2014122100174.html',
'md5': 'efd56753cda1bb64df52a3074f62e38a',
'info_dict': {
'id': 'capitulo-10-especial-solidario-nochebuena',
'ext': 'mp4',
'title': 'Especial Solidario de Nochebuena',
'description': 'md5:e2d52ff12214fa937107d21064075bf1',
'duration': 5527.6,
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'This video is only available for registered users'
},
{
'url': 'http://www.atresplayer.com/television/especial/videoencuentros/temporada-1/capitulo-112-david-bustamante_2014121600375.html',
'md5': '0d0e918533bbd4b263f2de4d197d4aac',
'info_dict': {
'id': 'capitulo-112-david-bustamante',
'ext': 'flv',
'title': 'David Bustamante',
'description': 'md5:f33f1c0a05be57f6708d4dd83a3b81c6',
'duration': 1439.0,
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html',
'only_matching': True,
},
]
_USER_AGENT = 'Dalvik/1.6.0 (Linux; U; Android 4.3; GT-I9300 Build/JSS15J'
_MAGIC = 'QWtMLXs414Yo+c#_+Q#K@NN)'
_TIMESTAMP_SHIFT = 30000
_TIME_API_URL = 'http://servicios.atresplayer.com/api/admin/time.json'
_URL_VIDEO_TEMPLATE = 'https://servicios.atresplayer.com/api/urlVideo/{1}/{0}/{1}|{2}|{3}.json'
_PLAYER_URL_TEMPLATE = 'https://servicios.atresplayer.com/episode/getplayer.json?episodePk=%s'
_EPISODE_URL_TEMPLATE = 'http://www.atresplayer.com/episodexml/%s'
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
_ERRORS = {
'UNPUBLISHED': 'We\'re sorry, but this video is not yet available.',
'DELETED': 'This video has expired and is no longer available for online streaming.',
'GEOUNPUBLISHED': 'We\'re sorry, but this video is not available in your region due to right restrictions.',
# 'PREMIUM': 'PREMIUM',
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'j_username': username,
'j_password': password,
}
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
error = self._html_search_regex(
r'(?s)<ul class="list_error">(.+?)</ul>', response, 'error', default=None)
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
episode_id = self._search_regex(
r'episode="([^"]+)"', webpage, 'episode id')
request = sanitized_Request(
self._PLAYER_URL_TEMPLATE % episode_id,
headers={'User-Agent': self._USER_AGENT})
player = self._download_json(request, episode_id, 'Downloading player JSON')
episode_type = player.get('typeOfEpisode')
error_message = self._ERRORS.get(episode_type)
if error_message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
formats = []
video_url = player.get('urlVideo')
if video_url:
format_info = {
'url': video_url,
'format_id': 'http',
}
mobj = re.search(r'(?P<bitrate>\d+)K_(?P<width>\d+)x(?P<height>\d+)', video_url)
if mobj:
format_info.update({
'width': int_or_none(mobj.group('width')),
'height': int_or_none(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
formats.append(format_info)
timestamp = int_or_none(self._download_webpage(
self._TIME_API_URL,
video_id, 'Downloading timestamp', fatal=False), 1000, time.time())
timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT)
token = hmac.new(
self._MAGIC.encode('ascii'),
(episode_id + timestamp_shifted).encode('utf-8'), hashlib.md5
).hexdigest()
request = sanitized_Request(
self._URL_VIDEO_TEMPLATE.format('windows', episode_id, timestamp_shifted, token),
headers={'User-Agent': self._USER_AGENT})
fmt_json = self._download_json(
request, video_id, 'Downloading windows video JSON')
result = fmt_json.get('resultDes')
if result.lower() != 'ok':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, result), expected=True)
for format_id, video_url in fmt_json['resultObject'].items():
if format_id == 'token' or not video_url.startswith('http'):
continue
if 'geodeswowsmpra3player' in video_url:
f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
# this videos are protected by DRM, the f4m downloader doesn't support them
continue
else:
f4m_url = video_url[:-9] + '/manifest.f4m'
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
path_data = player.get('pathData')
episode = self._download_xml(
self._EPISODE_URL_TEMPLATE % path_data, video_id,
'Downloading episode XML')
duration = float_or_none(xpath_text(
episode, './media/asset/info/technical/contentDuration', 'duration'))
art = episode.find('./media/asset/info/art')
title = xpath_text(art, './name', 'title')
description = xpath_text(art, './description', 'description')
thumbnail = xpath_text(episode, './media/asset/files/background', 'thumbnail')
subtitles = {}
subtitle_url = xpath_text(episode, './media/asset/files/subtitle', 'subtitle')
if subtitle_url:
subtitles['es'] = [{
'ext': 'srt',
'url': subtitle_url,
}]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 |
ocadotechnology/django-forge | forge/views/v3.py | 2 | 4771 | import urllib
import urlparse
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from .utils import json_response
from ..models import Author, Module, Release
## Helper methods
def error_response(errors, **kwargs):
"""
Returns an error response for v3 Forge API.
"""
error_dict = {'errors': errors}
if 'message' in kwargs:
error_dict['message'] = kwargs['message']
return json_response(
error_dict, indent=2,
status=kwargs.get('status', 400)
)
def query_dict(request):
"""
Returns query dictionary initialized with common parameters to v3 views.
"""
try:
limit = int(request.GET.get('limit', 20))
except ValueError:
limit = 20
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
offset = 0
return {
'limit': limit,
'offset': offset,
}
def pagination_data(qs, query, url_name):
"""
Returns a two-tuple comprising a Page and dictionary of pagination data
corresponding to the given queryset, query parameters, and URL name.
"""
limit = query['limit']
offset = query['offset']
p = Paginator(qs, limit)
page_num = (offset / p.per_page) + 1
page = p.page(page_num)
cur_url = urlparse.urlsplit(reverse(url_name))
first_query = query.copy()
first_query['offset'] = 0
first_url = urlparse.urlunsplit(
(cur_url.scheme, cur_url.netloc, cur_url.path,
urllib.urlencode(first_query), cur_url.fragment)
)
if page.has_previous():
prev_query = query.copy()
prev_query['offset'] = (page_num - 2) * p.per_page
prev_url = urlparse.urlunsplit(
(cur_url.scheme, cur_url.netloc, cur_url.path,
urllib.urlencode(prev_query), cur_url.fragment)
)
else:
prev_url = None
if page.has_next():
next_query = query.copy()
next_query['offset'] = page_num * p.per_page
next_url = urlparse.urlunsplit(
(cur_url.scheme, cur_url.netloc, cur_url.path,
urllib.urlencode(next_query), cur_url.fragment)
)
else:
next_url = None
pagination_dict = {
'limit': limit,
'offset': offset,
'first': first_url,
'previous': prev_url,
'next': next_url,
'total': p.count,
}
return page, pagination_dict
## API views
def modules(request):
"""
Provides the `/v3/modules` API endpoint.
"""
query = query_dict(request)
q = request.GET.get('query', None)
if q:
# Client has provided a search query..
query['query'] = q
parsed = Module.objects.parse_full_name(q)
if parsed:
# If query looks like a module name, try and get it.
author, name = parsed
qs = Module.objects.filter(author__name=author, name=name)
else:
# Otherwise we search other fields.
qs = (
Module.objects.filter(name__icontains=q) |
Module.objects.filter(author__name__icontains=q) |
Module.objects.filter(releases__version__icontains=q) |
Module.objects.filter(tags__icontains=q) |
Module.objects.filter(desc__icontains=q)
)
else:
qs = Module.objects.all()
# Ensure only distinct records are returned.
qs = qs.order_by('author__name').distinct()
# Get pagination page and data.
page, pagination_dict = pagination_data(qs, query, 'modules_v3')
modules_data = {
'pagination': pagination_dict,
'results': [module.v3 for module in page.object_list],
}
return json_response(modules_data, indent=2)
def releases(request):
"""
Provides the `/v3/releases` API endpoint.
"""
query = query_dict(request)
qs = Release.objects.all()
module_name = request.GET.get('module', None)
if module_name:
query['module'] = module_name
if Module.objects.parse_full_name(module_name):
try:
qs = qs.filter(
module=Module.objects.get_for_full_name(module_name)
)
except Module.DoesNotExist:
qs = qs.none()
else:
return error_response(
["'%s' is not a valid full modulename" % module_name]
)
# Get pagination page and data.
page, pagination_dict = pagination_data(qs, query, 'releases_v3')
# Constructing releases_data dictionary for serialization.
releases_data = {
'pagination': pagination_dict,
'results': [rel.v3 for rel in page.object_list],
}
return json_response(releases_data, indent=2)
| apache-2.0 |
wouwei/PiLapse | picam/picamEnv/Lib/site-packages/pip/req/req_install.py | 50 | 45589 | from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import traceback
import warnings
import zipfile
from distutils import sysconfig
from distutils.util import change_root
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six.moves import configparser
import pip.wheel
from pip.compat import native_str, get_stdlib, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path,
call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir,
get_installed_version, normalize_path, dist_is_local,
)
from pip.utils.hashes import Hashes
from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.ui import open_spinner
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
from pip._vendor.packaging.version import Version
logger = logging.getLogger(__name__)
operators = specifiers.Specifier._operators.keys()
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True,
pycompile=True, markers=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
self.extras = ()
if isinstance(req, six.string_types):
try:
req = pkg_resources.Requirement.parse(req)
except pkg_resources.RequirementParseError:
if os.path.sep in req:
add_msg = "It looks like a path. Does it exist ?"
elif '=' in req and not any(op in req for op in operators):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = traceback.format_exc()
raise InstallationError(
"Invalid requirement: '%s'\n%s" % (req, add_msg))
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.source_dir = source_dir
self.editable = editable
self._wheel_cache = wheel_cache
self.link = self.original_link = link
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
# Set True if a legitimate do-nothing-on-uninstall has happened - e.g.
# system site packages, stdlib packages.
self.nothing_to_uninstall = False
self.use_user_site = False
self.target_dir = None
self.options = options if options else {}
self.pycompile = pycompile
# Set to True after successful preparation of this requirement
self.prepared = False
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False, options=None, wheel_cache=None,
constraint=False):
from pip.index import Link
name, url, extras_override = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache, constraint=constraint)
if extras:
res.extras = pkg_resources.Requirement.parse('__placeholder__' +
extras).extras
return res
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade, require_hashes):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
If require_hashes is True, don't use the wheel cache, because cached
wheels, always built locally, have different hashes than the files
downloaded from the index server and thus throw false hash mismatches.
Furthermore, cached wheels at present have undeterministic contents due
to file modification times.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
if self._wheel_cache is not None and not require_hashes:
old_link = self.link
self.link = self._wheel_cache.cached_wheel(self.link, self.name)
if old_link != self.link:
logger.debug('Using cached wheel link: %s', self.link)
@property
def specifier(self):
return self.req.specifier
@property
def is_pinned(self):
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
specifiers = self.specifier
return (len(specifiers) == 1 and
next(iter(specifiers)).operator in ('==', '==='))
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def setup_py_dir(self):
return os.path.join(
self.source_dir,
self.link and self.link.subdirectory_fragment or '')
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
if get_installed_version('setuptools') is None:
add_msg = "Please install setuptools."
else:
add_msg = traceback.format_exc()
# Setuptools is not available
raise InstallationError(
"Could not import setuptools which is required to "
"install from a source distribution.\n%s" % add_msg
)
setup_py = os.path.join(self.setup_py_dir, 'setup.py')
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = SETUPTOOLS_SHIM % self.setup_py
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=self.setup_py_dir,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self._correct_build_location()
else:
metadata_name = canonicalize_name(self.pkg_info()["Name"])
if canonicalize_name(self.req.project_name) != metadata_name:
logger.warning(
'Running setup.py (path:%s) egg_info for package %s '
'produced metadata for project name %s. Fix your '
'#egg=%s fragments.',
self.setup_py, self.name, metadata_name, self.name
)
self.req = pkg_resources.Requirement.parse(metadata_name)
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.setup_py_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
dist_path = normalize_path(dist.location)
if not dist_is_local(dist):
logger.info(
"Not uninstalling %s at %s, outside environment %s",
dist.key,
dist_path,
sys.prefix,
)
self.nothing_to_uninstall = True
return
if dist_path in get_stdlib():
logger.info(
"Not uninstalling %s at %s, as it is in the standard library.",
dist.key,
dist_path,
)
self.nothing_to_uninstall = True
return
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
paths_to_remove.add(path + '.pyo')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip10Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
if six.PY2:
options = {}
else:
options = {"delimiters": ('=', )}
config = configparser.SafeConfigParser(**options)
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
elif not self.nothing_to_uninstall:
logger.error(
"Can't commit %s, nothing uninstalled.", self.name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.setup_py_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=[], root=None,
prefix=None):
if self.editable:
self.install_editable(
install_options, global_options, prefix=prefix)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root, prefix=prefix)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options += self.options.get('global_options', [])
install_options += self.options.get('install_options', [])
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable, "-u"]
install_args.append('-c')
install_args.append(SETUPTOOLS_SHIM % self.setup_py)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if prefix is not None:
install_args += ['--prefix', prefix]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
msg = 'Running setup.py install for %s' % (self.name,)
with open_spinner(msg) as spinner:
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.setup_py_dir,
show_stdout=False,
spinner=spinner,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options,
global_options=(), prefix=None):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
if prefix:
prefix_param = ['--prefix={0}'.format(prefix)]
install_options = list(install_options) + prefix_param
with indent_log():
# FIXME: should we do --install-headers here too?
call_subprocess(
[
sys.executable,
'-c',
SETUPTOOLS_SHIM % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=self.setup_py_dir,
show_stdout=False)
self.install_succeeded = True
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None, prefix=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
prefix=prefix,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
@property
def has_hash_options(self):
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.options.get('hashes', {}))
def hashes(self, trust_internet=True):
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if len(parts) > 2 and parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif len(parts) > 1 and parts[-1] == 'trunk':
req = parts[-2]
if req:
warnings.warn(
'Sniffing the requirement name from the url is deprecated and '
'will be removed in the future. Please specify an #egg segment '
'instead.', RemovedInPip9Warning,
stacklevel=2)
return req
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
from pip.index import Link
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
)
else:
return package_name, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
package_name = Link(url).egg_fragment
if not package_name:
package_name = _build_req_from_url(editable_req)
if not package_name:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
return _strip_postfix(package_name), url, None
| apache-2.0 |
talbrecht/pism_pik07 | site-packages/PISM/options.py | 2 | 8548 | # Copyright (C) 2011, 2014, 2015 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Helper functions to make working with the PISM/PETSc option system more pythonic."""
import PISM
def _to_tuple(option, use_default):
"""Convert a PISM Option object into a tuple of (value, flag). Return
(None, False) if use_default is False and the option was not set.
"""
if option.is_set() or use_default:
return (option.value(), option.is_set())
return (None, False)
def optionsIntWasSet(option, text, default=None):
"""Determines if an integer-valued command-line option was set.
:param option: Name of command-line option.
:param text: Description of option.
:param default: Default value if option was not set.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
"""
if default is None:
return _to_tuple(PISM.cpp.OptionInteger(option, text, 0), False)
else:
return _to_tuple(PISM.cpp.OptionInteger(option, text, default), True)
def optionsInt(*args, **kwargs):
"""Same as :func:`optionsIntWasSet` but only returns the integer value."""
return optionsIntWasSet(*args, **kwargs)[0]
def optionsRealWasSet(option, text, default=None):
"""Determines if a real-valued command line option was set.
:param option: Name of command line option.
:param text: Description of option.
:param default: Default value if option was not set.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
"""
if default is None:
return _to_tuple(PISM.cpp.OptionReal(option, text, 0.0), False)
else:
return _to_tuple(PISM.cpp.OptionReal(option, text, default), True)
def optionsReal(*args, **kwargs):
"""Same as :func:`optionsRealWasSet` but only returns the real value."""
return optionsRealWasSet(*args, **kwargs)[0]
def optionsStringWasSet(option, text, default=None):
"""Determines if a string-valued command line option was set.
:param option: Name of command line option.
:param text: Description of option.
:param default: Default value if option was not set.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
"""
if default is None:
return _to_tuple(PISM.cpp.OptionString(option, text, ""), False)
else:
return _to_tuple(PISM.cpp.OptionString(option, text, default), True)
def optionsString(*args, **kwargs):
"""Same as :func:`optionsStringWasSet` but only returns the string value."""
return optionsStringWasSet(*args, **kwargs)[0]
def optionsIntArrayWasSet(option, text, default=None):
"""Determines if an integer-array-valued command line option was set.
:param option: Name of command line option.
:param text: Description of option.
:param default: Default value if option was not set.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
"""
if default is None:
return _to_tuple(PISM.cpp.OptionIntegerList(option, text), False)
else:
option = PISM.cpp.OptionIntegerList(option, text)
if option.is_set():
return _to_tuple(option, True)
else:
return (default, False)
def optionsIntArray(*args, **kwargs):
"""Same as :func:`optionsIntArrayWasSet` but only returns the integer array."""
return optionsIntArrayWasSet(*args, **kwargs)[0]
def optionsRealArrayWasSet(option, text, default=None):
"""Determines if a real-array-valued command line option was set.
:param option: Name of command line option.
:param text: Description of option.
:param default: Default value if option was not set.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
"""
if default is None:
return _to_tuple(PISM.cpp.OptionRealList(option, text), False)
else:
option = PISM.cpp.OptionRealList(option, text)
if option.is_set():
return _to_tuple(option, True)
else:
return (default, False)
def optionsRealArray(*args, **kwargs):
"""Same as :func:`optionsRealArrayWasSet` but only returns the real array."""
return optionsRealArrayWasSet(*args, **kwargs)[0]
def optionsStringArrayWasSet(option, text, default=None):
"""Determines if a string-array-valued command line option was set.
:param option: Name of command line option.
:param text: Description of option.
:param default: Default value if option was not set.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
"""
if default is None:
return _to_tuple(PISM.cpp.OptionStringList(option, text, ""), False)
else:
option = PISM.cpp.OptionStringList(option, text, default)
if option.is_set():
return _to_tuple(option, True)
else:
return (default, False)
def optionsStringArray(*args, **kwargs):
"""Same as :func:`optionsStringArrayWasSet` but only returns the string array."""
return optionsStringArrayWasSet(*args, **kwargs)[0]
def optionsListWasSet(option, text, choices, default):
"""Determines if a string command line option was set, where the string can be one of a few legal options.
:param option: Name of command line option.
:param text: Description of option.
:param choices: Comma-separated list of legal values (a string).
:param default: Default value.
:returns: Tuple ``(value, wasSet)`` where ``value`` is the value that was set (or the ``default`` value if it was not)
and ``wasSet`` is a boolean that is ``True`` if the command line option was set explicitly.
"""
if default is None:
return _to_tuple(PISM.cpp.OptionKeyword(option, text, choices, ""), False)
else:
return _to_tuple(PISM.cpp.OptionKeyword(option, text, choices, default), True)
def optionsList(*args, **kwargs):
"""Same as :func:`optionsListWasSet` but only returns the option value."""
return optionsListWasSet(*args, **kwargs)[0]
def optionsFlag(option, text, default=False):
"""Determines if a flag command line option of the form ``-foo`` or ``-no_foo`` was set.
The option value is
:param option: Name of command line option.
:param text: Description of option.
:param default: Default value.
:returns: ``True`` if ``-foo`` was set and ``False`` if ``-no_foo`` was set. If
neither is set, the `default` is used, and if both are set a :exc:`RuntimeError` is raised.
"""
if option[0] == '-':
option = option[1:]
true_set = PISM.OptionBool("-" + option, text)
false_set = PISM.OptionBool("-no_" + option, text)
if true_set and false_set:
raise RuntimeError("Command line options inconsistent: both -%s and -no_%s are set" % (option, option))
if true_set:
return True
if false_set:
return False
return default
| gpl-3.0 |
armab/st2contrib | packs/opsgenie/actions/list_users.py | 4 | 1144 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from lib.actions import OpsGenieBaseAction
class ListUsersAction(OpsGenieBaseAction):
def run(self):
"""
List users in OpsGenie.
Returns:
- dict: Data from OpsGenie.
"""
payload = {"apiKey": self.api_key}
data = self._req("GET",
"v1/json/user",
payload=payload)
return data
| apache-2.0 |
Linaro/squad | squad/core/migrations/0028_suite_and_test_name_length.py | 2 | 1629 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-06 15:14
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0027_project_notification_strategy'),
]
operations = [
migrations.AlterField(
model_name='environment',
name='slug',
field=models.CharField(max_length=100, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9][a-zA-Z0-9_.-]*')]),
),
migrations.AlterField(
model_name='group',
name='slug',
field=models.CharField(max_length=100, unique=True, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9][a-zA-Z0-9_.-]*')]),
),
migrations.AlterField(
model_name='project',
name='slug',
field=models.CharField(max_length=100, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9][a-zA-Z0-9_.-]*')]),
),
migrations.AlterField(
model_name='suite',
name='name',
field=models.CharField(max_length=256, null=True),
),
migrations.AlterField(
model_name='suite',
name='slug',
field=models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9][a-zA-Z0-9_.-]*')]),
),
migrations.AlterField(
model_name='test',
name='name',
field=models.CharField(max_length=256),
),
]
| agpl-3.0 |
zorroz/microblog | flask/lib/python2.7/site-packages/sqlalchemy/testing/fixtures.py | 32 | 10721 | # testing/fixtures.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import config
from . import assertions, schema
from .util import adict
from .. import util
from .engines import drop_all_tables
from .entities import BasicEntity, ComparableEntity
import sys
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
# whether or not we use unittest changes things dramatically,
# as far as how py.test collection works.
class TestBase(object):
# A sequence of database names to always run, regardless of the
# constraints below.
__whitelist__ = ()
# A sequence of requirement names matching testing.requires decorators
__requires__ = ()
# A sequence of dialect names to exclude from the test class.
__unsupported_on__ = ()
# If present, test class is only runnable for the *single* specified
# dialect. If you need multiple, use __unsupported_on__ and invert.
__only_on__ = None
# A sequence of no-arg callables. If any are True, the entire testcase is
# skipped.
__skip_if__ = None
def assert_(self, val, msg=None):
assert val, msg
# apparently a handful of tests are doing this....OK
def setup(self):
if hasattr(self, "setUp"):
self.setUp()
def teardown(self):
if hasattr(self, "tearDown"):
self.tearDown()
class TablesTest(TestBase):
# 'once', None
run_setup_bind = 'once'
# 'once', 'each', None
run_define_tables = 'once'
# 'once', 'each', None
run_create_tables = 'once'
# 'once', 'each', None
run_inserts = 'each'
# 'each', None
run_deletes = 'each'
# 'once', None
run_dispose_bind = None
bind = None
metadata = None
tables = None
other = None
@classmethod
def setup_class(cls):
cls._init_class()
cls._setup_once_tables()
cls._setup_once_inserts()
@classmethod
def _init_class(cls):
if cls.run_define_tables == 'each':
if cls.run_create_tables == 'once':
cls.run_create_tables = 'each'
assert cls.run_inserts in ('each', None)
cls.other = adict()
cls.tables = adict()
cls.bind = cls.setup_bind()
cls.metadata = sa.MetaData()
cls.metadata.bind = cls.bind
@classmethod
def _setup_once_inserts(cls):
if cls.run_inserts == 'once':
cls._load_fixtures()
cls.insert_data()
@classmethod
def _setup_once_tables(cls):
if cls.run_define_tables == 'once':
cls.define_tables(cls.metadata)
if cls.run_create_tables == 'once':
cls.metadata.create_all(cls.bind)
cls.tables.update(cls.metadata.tables)
def _setup_each_tables(self):
if self.run_define_tables == 'each':
self.tables.clear()
if self.run_create_tables == 'each':
drop_all_tables(self.metadata, self.bind)
self.metadata.clear()
self.define_tables(self.metadata)
if self.run_create_tables == 'each':
self.metadata.create_all(self.bind)
self.tables.update(self.metadata.tables)
elif self.run_create_tables == 'each':
drop_all_tables(self.metadata, self.bind)
self.metadata.create_all(self.bind)
def _setup_each_inserts(self):
if self.run_inserts == 'each':
self._load_fixtures()
self.insert_data()
def _teardown_each_tables(self):
# no need to run deletes if tables are recreated on setup
if self.run_define_tables != 'each' and self.run_deletes == 'each':
with self.bind.connect() as conn:
for table in reversed(self.metadata.sorted_tables):
try:
conn.execute(table.delete())
except sa.exc.DBAPIError as ex:
util.print_(
("Error emptying table %s: %r" % (table, ex)),
file=sys.stderr)
def setup(self):
self._setup_each_tables()
self._setup_each_inserts()
def teardown(self):
self._teardown_each_tables()
@classmethod
def _teardown_once_metadata_bind(cls):
if cls.run_create_tables:
drop_all_tables(cls.metadata, cls.bind)
if cls.run_dispose_bind == 'once':
cls.dispose_bind(cls.bind)
cls.metadata.bind = None
if cls.run_setup_bind is not None:
cls.bind = None
@classmethod
def teardown_class(cls):
cls._teardown_once_metadata_bind()
@classmethod
def setup_bind(cls):
return config.db
@classmethod
def dispose_bind(cls, bind):
if hasattr(bind, 'dispose'):
bind.dispose()
elif hasattr(bind, 'close'):
bind.close()
@classmethod
def define_tables(cls, metadata):
pass
@classmethod
def fixtures(cls):
return {}
@classmethod
def insert_data(cls):
pass
def sql_count_(self, count, fn):
self.assert_sql_count(self.bind, fn, count)
def sql_eq_(self, callable_, statements):
self.assert_sql(self.bind, callable_, statements)
@classmethod
def _load_fixtures(cls):
"""Insert rows as represented by the fixtures() method."""
headers, rows = {}, {}
for table, data in cls.fixtures().items():
if len(data) < 2:
continue
if isinstance(table, util.string_types):
table = cls.tables[table]
headers[table] = data[0]
rows[table] = data[1:]
for table in cls.metadata.sorted_tables:
if table not in headers:
continue
cls.bind.execute(
table.insert(),
[dict(zip(headers[table], column_values))
for column_values in rows[table]])
from sqlalchemy import event
class RemovesEvents(object):
@util.memoized_property
def _event_fns(self):
return set()
def event_listen(self, target, name, fn):
self._event_fns.add((target, name, fn))
event.listen(target, name, fn)
def teardown(self):
for key in self._event_fns:
event.remove(*key)
super_ = super(RemovesEvents, self)
if hasattr(super_, "teardown"):
super_.teardown()
class _ORMTest(object):
@classmethod
def teardown_class(cls):
sa.orm.session.Session.close_all()
sa.orm.clear_mappers()
class ORMTest(_ORMTest, TestBase):
pass
class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults):
# 'once', 'each', None
run_setup_classes = 'once'
# 'once', 'each', None
run_setup_mappers = 'each'
classes = None
@classmethod
def setup_class(cls):
cls._init_class()
if cls.classes is None:
cls.classes = adict()
cls._setup_once_tables()
cls._setup_once_classes()
cls._setup_once_mappers()
cls._setup_once_inserts()
@classmethod
def teardown_class(cls):
cls._teardown_once_class()
cls._teardown_once_metadata_bind()
def setup(self):
self._setup_each_tables()
self._setup_each_classes()
self._setup_each_mappers()
self._setup_each_inserts()
def teardown(self):
sa.orm.session.Session.close_all()
self._teardown_each_mappers()
self._teardown_each_classes()
self._teardown_each_tables()
@classmethod
def _teardown_once_class(cls):
cls.classes.clear()
_ORMTest.teardown_class()
@classmethod
def _setup_once_classes(cls):
if cls.run_setup_classes == 'once':
cls._with_register_classes(cls.setup_classes)
@classmethod
def _setup_once_mappers(cls):
if cls.run_setup_mappers == 'once':
cls._with_register_classes(cls.setup_mappers)
def _setup_each_mappers(self):
if self.run_setup_mappers == 'each':
self._with_register_classes(self.setup_mappers)
def _setup_each_classes(self):
if self.run_setup_classes == 'each':
self._with_register_classes(self.setup_classes)
@classmethod
def _with_register_classes(cls, fn):
"""Run a setup method, framing the operation with a Base class
that will catch new subclasses to be established within
the "classes" registry.
"""
cls_registry = cls.classes
class FindFixture(type):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
return type.__init__(cls, classname, bases, dict_)
class _Base(util.with_metaclass(FindFixture, object)):
pass
class Basic(BasicEntity, _Base):
pass
class Comparable(ComparableEntity, _Base):
pass
cls.Basic = Basic
cls.Comparable = Comparable
fn()
def _teardown_each_mappers(self):
# some tests create mappers in the test bodies
# and will define setup_mappers as None -
# clear mappers in any case
if self.run_setup_mappers != 'once':
sa.orm.clear_mappers()
def _teardown_each_classes(self):
if self.run_setup_classes != 'once':
self.classes.clear()
@classmethod
def setup_classes(cls):
pass
@classmethod
def setup_mappers(cls):
pass
class DeclarativeMappedTest(MappedTest):
run_setup_classes = 'once'
run_setup_mappers = 'once'
@classmethod
def _setup_once_tables(cls):
pass
@classmethod
def _with_register_classes(cls, fn):
cls_registry = cls.classes
class FindFixtureDeclarative(DeclarativeMeta):
def __init__(cls, classname, bases, dict_):
cls_registry[classname] = cls
return DeclarativeMeta.__init__(
cls, classname, bases, dict_)
class DeclarativeBasic(object):
__table_cls__ = schema.Table
_DeclBase = declarative_base(metadata=cls.metadata,
metaclass=FindFixtureDeclarative,
cls=DeclarativeBasic)
cls.DeclarativeBasic = _DeclBase
fn()
if cls.metadata.tables and cls.run_create_tables:
cls.metadata.create_all(config.db)
| bsd-3-clause |
cneumann/vrjuggler | modules/gadgeteer/tools/matrix_solver/matrix_solver.py | 7 | 3642 | """
Matrix Solver
Parses a calibration table and solves the equations for the alpha constants
used in the Hardy's Multi-Quadric method of calibration.
"""
import os, sys, string
from math import sqrt
from xml.dom import *
from xml.dom.minidom import *
import Numeric, LinearAlgebra
# Define useful functions
def length(v):
"""
Determines the magnitude of a three dimensional vector, v.
"""
return sqrt( v[0] * v[0] + v[1] * v[1] + v[2] * v[2] )
def vec_subtract(a, b):
"""
Returns a tuple c, s.t. c = a - b
"""
return (a[0] - b[0], a[1] - b[1], a[2] - b[2])
def vec_multiply(a, b):
"""
Returns the scalar result of a dot b.
"""
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
argc = len(sys.argv)
if argc < 2 or argc > 3:
print "Usage: matrix_solver.py input_file [output_file]"
sys.exit(1)
# XXX: Take out the debug file when ready.
dbg_file = file('debug_output.txt', 'w')
# Open the table file
in_file = file(sys.argv[1], 'r')
doc = parse(in_file)
root_element = doc.documentElement
# Get the offsets from the table
offset_elements = root_element.getElementsByTagName('Offset')
offset_table = {}
# This has to be done since keys and values in Python dictionaries are stored
# in random order.
keys_in_order = []
dbg_file.write('Parsed Offsets\n')
# Build an offset table.
for e in offset_elements:
curr_offset = string.split(e.firstChild.data)
qx = e.attributes['X'].nodeValue
qy = e.attributes['Y'].nodeValue
qz = e.attributes['Z'].nodeValue
q = ( float(qx), float(qy), float(qz) )
px = curr_offset[0]
py = curr_offset[1]
pz = curr_offset[2]
p = ( float(px), float(py), float(pz) )
dbg_file.write('(' + qx + ',' + qy + ',' + qz + '):(' + px + ',' + py + ',' + pz + ')\n')
dbg_file.write(str(q) + ' : ' + str(p) + '\n')
offset_table[q] = p
keys_in_order.append(q)
dbg_file.write('\nOffset Table\n')
dbg_file.write(str(offset_table))
# w[j](p) = sqrt( (p-p[j]) * (p-p[j]) + R^2 )
# s.t. 10 <= pow(R, 2) <= 1000
w_matrix_list = []
r_squared = 0.4
print 'Calculating W Matrix...'
for i in range(0, len(offset_table)):
w_matrix_row = []
p = offset_table[keys_in_order[i]]
for j in range(0, len(offset_table)):
pj = offset_table[keys_in_order[j]]
p_difference = vec_subtract(p, pj)
w = sqrt(vec_multiply(p_difference, p_difference) + r_squared)
w_matrix_row.append(w)
w_matrix_list.append(w_matrix_row)
dbg_file.write('\nW Matrix List\n')
dbg_file.write( str(w_matrix_list) )
w_matrix = Numeric.array(w_matrix_list)
dbg_file.write('\nW Matrix\n')
dbg_file.write( str(w_matrix) )
q_list = []
#for q in offset_table.values():
# q_list.append(list(q))
for k in keys_in_order:
q_list.append( list(k) )
dbg_file.write('\nQ List\n')
dbg_file.write( str(q_list) )
q_vector = Numeric.array(q_list)
print 'Solving for alpha vector...'
alpha_vector = LinearAlgebra.solve_linear_equations(w_matrix, q_vector)
dbg_file.write('\nAlpha Vector\n')
dbg_file.write( str(alpha_vector) )
print 'Alpha Vector found.'
out_file = ''
if argc == '2':
out_file = sys.argv[1]
else:
out_file = sys.argv[2]
in_file.close()
out_file = file(out_file, 'w')
alpha_vector_list = alpha_vector.tolist()
dbg_file.write('\nCheck Solution\n')
solution_check = Numeric.matrixmultiply(w_matrix, alpha_vector)
dbg_file.write( str(solution_check) )
# Add Alpha constants to XML Tree
for i in alpha_vector_list:
element = Element('Alpha')
element.setAttribute('X', str(i[0]))
element.setAttribute('Y', str(i[1]))
element.setAttribute('Z', str(i[2]))
root_element.appendChild(element)
out_file.write(doc.toprettyxml())
out_file.close()
| lgpl-2.1 |
pilou-/ansible | lib/ansible/modules/network/aci/aci_contract_subject.py | 22 | 10735 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_contract_subject
short_description: Manage initial Contract Subjects (vz:Subj)
description:
- Manage initial Contract Subjects on Cisco ACI fabrics.
version_added: '2.4'
options:
tenant:
description:
- The name of the tenant.
type: str
aliases: [ tenant_name ]
subject:
description:
- The contract subject name.
type: str
aliases: [ contract_subject, name, subject_name ]
contract:
description:
- The name of the Contract.
type: str
aliases: [ contract_name ]
reverse_filter:
description:
- Determines if the APIC should reverse the src and dst ports to allow the
return traffic back, since ACI is stateless filter.
- The APIC defaults to C(yes) when unset during creation.
type: bool
priority:
description:
- The QoS class.
- The APIC defaults to C(unspecified) when unset during creation.
type: str
choices: [ level1, level2, level3, unspecified ]
dscp:
description:
- The target DSCP.
- The APIC defaults to C(unspecified) when unset during creation.
type: str
choices: [ AF11, AF12, AF13, AF21, AF22, AF23, AF31, AF32, AF33, AF41, AF42, AF43,
CS0, CS1, CS2, CS3, CS4, CS5, CS6, CS7, EF, VA, unspecified ]
aliases: [ target ]
description:
description:
- Description for the contract subject.
type: str
aliases: [ descr ]
consumer_match:
description:
- The match criteria across consumers.
- The APIC defaults to C(at_least_one) when unset during creation.
type: str
choices: [ all, at_least_one, at_most_one, none ]
provider_match:
description:
- The match criteria across providers.
- The APIC defaults to C(at_least_one) when unset during creation.
type: str
choices: [ all, at_least_one, at_most_one, none ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(tenant) and C(contract) used must exist before using this module in your playbook.
The M(aci_tenant) and M(aci_contract) modules can be used for this.
seealso:
- module: aci_contract
- module: aci_tenant
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(vz:Subj).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Swetha Chunduri (@schunduri)
'''
EXAMPLES = r'''
- name: Add a new contract subject
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: default
description: test
reverse_filter: yes
priority: level1
dscp: unspecified
state: present
register: query_result
- name: Remove a contract subject
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: default
state: absent
delegate_to: localhost
- name: Query a contract subject
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: default
state: query
delegate_to: localhost
register: query_result
- name: Query all contract subjects
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
MATCH_MAPPING = dict(
all='All',
at_least_one='AtleastOne',
at_most_one='AtmostOne',
none='None',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
subject=dict(type='str', aliases=['contract_subject', 'name', 'subject_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
priority=dict(type='str', choices=['unspecified', 'level1', 'level2', 'level3']),
reverse_filter=dict(type='bool'),
dscp=dict(type='str', aliases=['target'],
choices=['AF11', 'AF12', 'AF13', 'AF21', 'AF22', 'AF23', 'AF31', 'AF32', 'AF33', 'AF41', 'AF42', 'AF43',
'CS0', 'CS1', 'CS2', 'CS3', 'CS4', 'CS5', 'CS6', 'CS7', 'EF', 'VA', 'unspecified']),
description=dict(type='str', aliases=['descr']),
consumer_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
provider_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
directive=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
filter=dict(type='str', aliases=['filter_name'], removed_in_version='2.4'), # Deprecated starting from v2.4
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'subject', 'tenant']],
['state', 'present', ['contract', 'subject', 'tenant']],
],
)
aci = ACIModule(module)
subject = module.params['subject']
priority = module.params['priority']
reverse_filter = aci.boolean(module.params['reverse_filter'])
contract = module.params['contract']
dscp = module.params['dscp']
description = module.params['description']
filter_name = module.params['filter']
directive = module.params['directive']
consumer_match = module.params['consumer_match']
if consumer_match is not None:
consumer_match = MATCH_MAPPING[consumer_match]
provider_match = module.params['provider_match']
if provider_match is not None:
provider_match = MATCH_MAPPING[provider_match]
state = module.params['state']
tenant = module.params['tenant']
if directive is not None or filter_name is not None:
module.fail_json(msg="Managing Contract Subjects to Filter bindings has been moved to module 'aci_subject_bind_filter'")
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
module_object=contract,
target_filter={'name': contract},
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
module_object=subject,
target_filter={'name': subject},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzSubj',
class_config=dict(
name=subject,
prio=priority,
revFltPorts=reverse_filter,
targetDscp=dscp,
consMatchT=consumer_match,
provMatchT=provider_match,
descr=description,
),
)
aci.get_diff(aci_class='vzSubj')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
georgid/sms-tools | lectures/5-Sinusoidal-model/plots-code/sine-analysis-synthesis.py | 2 | 1538 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, hN, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
plt.figure(1, figsize=(9, 6))
plt.subplot(4,1,1)
plt.plot(np.arange(-M/2,M/2), x1, 'b', lw=1.5)
plt.axis([-M/2,M/2, min(x1), max(x1)])
plt.title("x (oboe-A4.wav), M = 601")
plt.subplot(4,1,2)
plt.plot(np.arange(hN), mX, 'r', lw=1.5)
plt.plot(iploc, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.axis([0, hN,-90,max(mX)+2])
plt.title("mX + spectral peaks; Blackman, N = 1024")
plt.subplot(4,1,3)
plt.plot(np.arange(hNs), mY, 'r', lw=1.5)
plt.axis([0, hNs,-90,max(mY)+2])
plt.title("mY; Blackman-Harris; Ns = 512")
plt.subplot(4,1,4)
plt.plot(np.arange(Ns), y, 'b', lw=1.5)
plt.axis([0, Ns,min(y),max(y)])
plt.title("y; Ns = 512")
plt.tight_layout()
plt.savefig('sine-analysis-synthesis.png')
plt.show()
| agpl-3.0 |
commtrack/temp-aquatest | apps/user_registration/forms.py | 1 | 4996 | """
Forms and validation code for user user_registration.
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = { 'class': 'required' }
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
username = forms.RegexField(regex=r'^\w+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={ 'invalid': _("This value must contain only letters, numbers and underscores.") })
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("Email address"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
try:
user = User.objects.get(username__iexact=self.cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
raise forms.ValidationError(_("A user with that username already exists."))
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'),
error_messages={ 'required': _("You must agree to the terms to register") })
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
| bsd-3-clause |
sg00dwin/origin | vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/x509_extension.py | 34 | 7546 | """ASN.1 specification for X509 extensions."""
from ct.crypto.asn1 import named_value
from ct.crypto.asn1 import oid
from ct.crypto.asn1 import tag
from ct.crypto.asn1 import types
from ct.crypto.asn1 import x509_common
from ct.crypto.asn1 import x509_name
# Standard extensions from RFC 5280.
class BasicConstraints(types.Sequence):
print_delimiter = ", "
components = (
(types.Component("cA", types.Boolean, default=False)),
(types.Component("pathLenConstraint", types.Integer, optional=True))
)
class SubjectAlternativeNames(types.SequenceOf):
print_delimiter = ", "
component = x509_name.GeneralName
class KeyUsage(types.NamedBitList):
DIGITAL_SIGNATURE = named_value.NamedValue("digitalSignature", 0)
NON_REPUDIATION = named_value.NamedValue("nonRepudiation", 1)
KEY_ENCIPHERMENT = named_value.NamedValue("keyEncipherment", 2)
DATA_ENCIPHERMENT = named_value.NamedValue("dataEncipherment", 3)
KEY_AGREEMENT = named_value.NamedValue("keyAgreement", 4)
KEY_CERT_SIGN = named_value.NamedValue("keyCertSign", 5)
CRL_SIGN = named_value.NamedValue("cRLSign", 6)
ENCIPHER_ONLY = named_value.NamedValue("encipherOnly", 7)
DECIPHER_ONLY = named_value.NamedValue("decipherOnly", 8)
named_bit_list = (DIGITAL_SIGNATURE, NON_REPUDIATION, KEY_ENCIPHERMENT,
DATA_ENCIPHERMENT, KEY_AGREEMENT, KEY_CERT_SIGN,
CRL_SIGN, ENCIPHER_ONLY, DECIPHER_ONLY)
class KeyPurposeID(oid.ObjectIdentifier):
pass
class ExtendedKeyUsage(types.SequenceOf):
print_delimiter = ", "
print_labels = False
component = KeyPurposeID
class KeyIdentifier(types.OctetString):
pass
class SubjectKeyIdentifier(KeyIdentifier):
pass
KEY_IDENTIFIER = "keyIdentifier"
AUTHORITY_CERT_ISSUER = "authorityCertIssuer"
AUTHORITY_CERT_SERIAL_NUMBER = "authorityCertSerialNumber"
class AuthorityKeyIdentifier(types.Sequence):
components = (
types.Component(KEY_IDENTIFIER, KeyIdentifier.implicit(0), optional=True),
types.Component(AUTHORITY_CERT_ISSUER, x509_name.GeneralNames.implicit(1),
optional=True),
types.Component(AUTHORITY_CERT_SERIAL_NUMBER,
x509_common.CertificateSerialNumber.implicit(2),
optional=True)
)
class DisplayText(types.Choice):
components = {
"ia5String": types.IA5String,
"visibleString": types.VisibleString,
"bmpString": types.BMPString,
"utf8String": types.UTF8String
}
class NoticeNumbers(types.SequenceOf):
component = types.Integer
class NoticeReference(types.Sequence):
components = (
types.Component("organization", DisplayText),
types.Component("noticeNumbers", NoticeNumbers)
)
NOTICE_REF = "noticeRef"
EXPLICIT_TEXT = "explicitText"
class UserNotice(types.Sequence):
components = (
types.Component(NOTICE_REF, NoticeReference, optional=True),
types.Component(EXPLICIT_TEXT, DisplayText, optional=True)
)
class CPSuri(types.IA5String):
pass
_POLICY_QUALIFIER_DICT = {
oid.ID_QT_CPS: CPSuri,
oid.ID_QT_UNOTICE: UserNotice
}
POLICY_QUALIFIER_ID = "policyQualifierId"
QUALIFIER = "qualifier"
class PolicyQualifierInfo(types.Sequence):
print_labels = False
print_delimiter = ": "
components = (
types.Component(POLICY_QUALIFIER_ID, oid.ObjectIdentifier),
types.Component(QUALIFIER, types.Any, defined_by="policyQualifierId",
lookup=_POLICY_QUALIFIER_DICT)
)
class PolicyQualifiers(types.SequenceOf):
print_labels = False
component = PolicyQualifierInfo
POLICY_IDENTIFIER = "policyIdentifier"
POLICY_QUALIFIERS = "policyQualifiers"
class PolicyInformation(types.Sequence):
components = (
types.Component(POLICY_IDENTIFIER, oid.ObjectIdentifier),
types.Component(POLICY_QUALIFIERS, PolicyQualifiers, optional=True)
)
class CertificatePolicies(types.SequenceOf):
component = PolicyInformation
FULL_NAME = "fullName"
RELATIVE_NAME = "nameRelativetoCRLIssuer"
class DistributionPointName(types.Choice):
components = {
FULL_NAME: x509_name.GeneralNames.implicit(0),
RELATIVE_NAME: x509_name.RelativeDistinguishedName.implicit(1)
}
class ReasonFlags(types.NamedBitList):
UNUSED = named_value.NamedValue("unused", 0)
KEY_COMPROMISE = named_value.NamedValue("keyCompromise", 1)
CA_COMPROMISE = named_value.NamedValue("cACompromise", 2),
AFFILIATION_CHANGED = named_value.NamedValue("affiliationChanged", 3)
SUPERSEDED = named_value.NamedValue("superseded", 4)
CESSATION_OF_OPERATION = named_value.NamedValue("cessationOfOperation", 5)
CERTIFICATE_HOLD = named_value.NamedValue("certificateHold", 6)
PRIVILEGE_WITHDRAWN = named_value.NamedValue("privilegeWithdrawn", 7)
AA_COMPROMISE = named_value.NamedValue("aACompromise", 8)
named_bit_list = (UNUSED, KEY_COMPROMISE, CA_COMPROMISE,
AFFILIATION_CHANGED, SUPERSEDED, CESSATION_OF_OPERATION,
CERTIFICATE_HOLD, PRIVILEGE_WITHDRAWN, AA_COMPROMISE)
DISTRIBUTION_POINT = "distributionPoint"
REASONS = "reasons"
CRL_ISSUER = "cRLIssuer"
class DistributionPoint(types.Sequence):
components = (
types.Component(DISTRIBUTION_POINT, DistributionPointName.explicit(0),
optional=True),
types.Component(REASONS, ReasonFlags.implicit(1), optional=True),
types.Component(CRL_ISSUER, x509_name.GeneralNames.implicit(2),
optional=True)
)
class CRLDistributionPoints(types.SequenceOf):
component = DistributionPoint
ACCESS_METHOD = "accessMethod"
ACCESS_LOCATION = "accessLocation"
class AccessDescription(types.Sequence):
print_labels = False
print_delimiter = ": "
components = (
types.Component(ACCESS_METHOD, oid.ObjectIdentifier),
types.Component(ACCESS_LOCATION, x509_name.GeneralName)
)
# Called AuthorityInfoAccessSyntax in RFC 5280.
class AuthorityInfoAccess(types.SequenceOf):
component = AccessDescription
class SignedCertificateTimestampList(types.OctetString):
pass
# Hack! This is not a valid ASN.1 definition but it works: an extension value
# value is defined as a DER-encoded value wrapped in an OctetString.
# This is functionally equivalent to an Any type that is tagged with the
# OctetString tag.
@types.Universal(4, tag.PRIMITIVE)
class ExtensionValue(types.Any):
pass
_EXTENSION_DICT = {
oid.ID_CE_BASIC_CONSTRAINTS: BasicConstraints,
oid.ID_CE_SUBJECT_ALT_NAME: SubjectAlternativeNames,
oid.ID_CE_KEY_USAGE: KeyUsage,
oid.ID_CE_EXT_KEY_USAGE: ExtendedKeyUsage,
oid.ID_CE_SUBJECT_KEY_IDENTIFIER: SubjectKeyIdentifier,
oid.ID_CE_AUTHORITY_KEY_IDENTIFIER: AuthorityKeyIdentifier,
oid.ID_CE_CERTIFICATE_POLICIES: CertificatePolicies,
oid.ID_CE_CRL_DISTRIBUTION_POINTS: CRLDistributionPoints,
oid.ID_PE_AUTHORITY_INFO_ACCESS: AuthorityInfoAccess,
oid.CT_POISON: types.Null,
oid.CT_EMBEDDED_SCT_LIST: SignedCertificateTimestampList
}
class Extension(types.Sequence):
print_delimiter = ", "
components = (
types.Component("extnID", oid.ObjectIdentifier),
types.Component("critical", types.Boolean, default=False),
types.Component("extnValue", ExtensionValue, defined_by="extnID",
lookup=_EXTENSION_DICT)
)
class Extensions(types.SequenceOf):
component = Extension
| apache-2.0 |
dkroy/luigi | luigi/six.py | 65 | 29796 | """Utilities for writing code that runs on Python 2 and 3
In luigi, we hard-copy this file into the project itself, to ensure that all
luigi users use the same version of six.
"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to from luigi import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| apache-2.0 |
kalxas/geonode | geonode/base/api/pagination.py | 6 | 1935 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
DEFAULT_PAGE = getattr(settings, 'REST_API_DEFAULT_PAGE', 1)
DEFAULT_PAGE_SIZE = getattr(settings, 'REST_API_DEFAULT_PAGE_SIZE', 10)
DEFAULT_PAGE_QUERY_PARAM = getattr(settings, 'REST_API_DEFAULT_PAGE_QUERY_PARAM', 'page_size')
class GeoNodeApiPagination(PageNumberPagination):
page = DEFAULT_PAGE
page_size = DEFAULT_PAGE_SIZE
page_size_query_param = DEFAULT_PAGE_QUERY_PARAM
def get_paginated_response(self, data):
_paginated_response = {
'links': {
'next': self.get_next_link(),
'previous': self.get_previous_link()
},
'total': self.page.paginator.count,
'page': int(self.request.GET.get('page', DEFAULT_PAGE)), # can not set default = self.page
DEFAULT_PAGE_QUERY_PARAM: int(self.request.GET.get(DEFAULT_PAGE_QUERY_PARAM, self.page_size))
}
_paginated_response.update(data)
return Response(_paginated_response)
| gpl-3.0 |
joshblum/django-with-audit | django/contrib/localflavor/ar/forms.py | 87 | 4024 | # -*- coding: utf-8 -*-
"""
AR-specific Form helpers.
"""
from __future__ import absolute_import
from django.contrib.localflavor.ar.ar_provinces import PROVINCE_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, CharField, Select
from django.utils.translation import ugettext_lazy as _
class ARProvinceSelect(Select):
"""
A Select widget that uses a list of Argentinean provinces/autonomous cities
as its choices.
"""
def __init__(self, attrs=None):
super(ARProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ARPostalCodeField(RegexField):
"""
A field that accepts a 'classic' NNNN Postal Code or a CPA.
See http://www.correoargentino.com.ar/consulta_cpa/home.php
"""
default_error_messages = {
'invalid': _("Enter a postal code in the format NNNN or ANNNNAAA."),
}
def __init__(self, max_length=8, min_length=4, *args, **kwargs):
super(ARPostalCodeField, self).__init__(r'^\d{4}$|^[A-HJ-NP-Za-hj-np-z]\d{4}\D{3}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ARPostalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if len(value) not in (4, 8):
raise ValidationError(self.error_messages['invalid'])
if len(value) == 8:
return u'%s%s%s' % (value[0].upper(), value[1:5], value[5:].upper())
return value
class ARDNIField(CharField):
"""
A field that validates 'Documento Nacional de Identidad' (DNI) numbers.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 7 or 8 digits."),
}
def __init__(self, max_length=10, min_length=7, *args, **kwargs):
super(ARDNIField, self).__init__(max_length, min_length, *args,
**kwargs)
def clean(self, value):
"""
Value can be a string either in the [X]X.XXX.XXX or [X]XXXXXXX formats.
"""
value = super(ARDNIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
value = value.replace('.', '')
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) not in (7, 8):
raise ValidationError(self.error_messages['max_digits'])
return value
class ARCUITField(RegexField):
"""
This field validates a CUIT (Código Único de Identificación Tributaria). A
CUIT is of the form XX-XXXXXXXX-V. The last digit is a check digit.
"""
default_error_messages = {
'invalid': _('Enter a valid CUIT in XX-XXXXXXXX-X or XXXXXXXXXXXX format.'),
'checksum': _("Invalid CUIT."),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ARCUITField, self).__init__(r'^\d{2}-?\d{8}-?\d$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Value can be either a string in the format XX-XXXXXXXX-X or an
11-digit number.
"""
value = super(ARCUITField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value, cd = self._canon(value)
if self._calc_cd(value) != cd:
raise ValidationError(self.error_messages['checksum'])
return self._format(value, cd)
def _canon(self, cuit):
cuit = cuit.replace('-', '')
return cuit[:-1], cuit[-1]
def _calc_cd(self, cuit):
mults = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
tmp = sum([m * int(cuit[idx]) for idx, m in enumerate(mults)])
return str(11 - tmp % 11)
def _format(self, cuit, check_digit=None):
if check_digit == None:
check_digit = cuit[-1]
cuit = cuit[:-1]
return u'%s-%s-%s' % (cuit[:2], cuit[2:], check_digit)
| bsd-3-clause |
xapp-le/kernel | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
yamila-moreno/django | django/templatetags/i18n.py | 16 | 18205 | from __future__ import unicode_literals
import sys
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.template.base import TOKEN_TEXT, TOKEN_VAR, render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six, translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=False):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
msg = ''.join(result)
if self.trimmed:
msg = translation.trim_whitespace(msg)
return msg, vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = context.template.engine.string_if_invalid
def render_value(key):
if key in context:
val = context[key]
else:
val = default_value % key if '%s' in default_value else default_value
return render_value_in_context(val, context)
data = {v: render_value(v) for v in vars}
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError("'blocktrans' is unable to format "
"string returned by gettext: %r using %r" % (result, data))
with translation.override(None):
result = self.render(context, nested=True)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style list (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_translated(lang_code):
english_name = translation.get_language_info(lang_code)['name']
return translation.ugettext(english_name)
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
noop = False
asvar = None
message_context = None
seen = set()
invalid_context = {'as', 'noop'}
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'noop':
noop = True
elif option == 'context':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the context option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
if value in invalid_context:
raise TemplateSyntaxError(
"Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]),
)
message_context = parser.compile_filter(value)
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the as option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError(
"Unknown argument for '%s' tag: '%s'. The only options "
"available are 'noop', 'context' \"xxx\", and 'as VAR'." % (
bits[0], option,
)
)
seen.add(option)
return TranslateNode(message_string, noop, asvar, message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
elif option == "trimmed":
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(options['count'].items())[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
trimmed = options.get("trimmed", False)
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context, trimmed=trimmed)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
| bsd-3-clause |
SublimeText-Markdown/MarkdownEditing | references.py | 1 | 26853 | """
Commands related to links, references and footnotes.
Exported commands:
ReferenceJumpCommand
ReferenceJumpContextCommand
ReferenceNewReferenceCommand
ReferenceNewInlineLinkCommand
ReferenceNewInlineImage
ReferenceNewImage
ReferenceNewFootnote
ReferenceDeleteReference
ReferenceOrganize
GatherMissingLinkMarkersCommand
ConvertInlineLinkToReferenceCommand
ConvertInlineLinksToReferencesCommand
"""
import sublime
import re
import operator
try:
from MarkdownEditing.mdeutils import MDETextCommand
except ImportError:
from mdeutils import MDETextCommand
refname_scope_name = "constant.other.reference.link.markdown"
definition_scope_name = "meta.link.reference.def.markdown"
footnote_scope_name = "meta.link.reference.footnote.markdown"
marker_scope_name = "meta.link.reference.markdown"
marker_literal_scope_name = "meta.link.reference.literal.markdown"
marker_image_scope_name = "meta.image.reference.markdown"
ref_link_scope_name = "markup.underline.link.markdown"
marker_begin_scope_name = "punctuation.definition.string.begin.markdown"
marker_text_end_scope_name = "punctuation.definition.string.end.markdown"
marker_text_scope_name = "string.other.link.title.markdown"
refname_start_scope_name = "punctuation.definition.constant.begin.markdown"
marker_end_scope_name = "punctuation.definition.constant.end.markdown"
def hasScope(scope_name, to_find):
"""Test to_find's existence in scope_name."""
return to_find in scope_name.split(" ")
class Obj(object):
"""A utility obj for anoymous object."""
def __init__(self, **kwargs):
"""Take keyword arguments."""
self.__dict__.update(kwargs)
def getMarkers(view, name=''):
"""Find all markers."""
# returns {name -> Region}
markers = []
name = re.escape(name)
if name == '':
markers.extend(view.find_all(r"(?<=\]\[)([^\]]+)(?=\])", 0)) # ][???]
markers.extend(view.find_all(r"(?<=\[)([^\]]*)(?=\]\[\])", 0)) # [???][]
markers.extend(view.find_all(r"(?<=\[)(\^[^\]]+)(?=\])(?!\s*\]:)", 0)) # [^???]
markers.extend(view.find_all(r"(?<!\]\[)(?<=\[)([^\]]+)(?=\])(?!\]\[)(?!\]\()(?!\]:)", 0)) # [???]
else:
# ][name]
markers.extend(view.find_all(r"(?<=\]\[)(?i)(%s)(?=\])" % name, 0))
markers.extend(view.find_all(r"(?<=\[)(?i)(%s)(?=\]\[\])" % name, 0)) # [name][]
markers.extend(view.find_all(r"(?<!\]\[)(?<=\[)(?i)(%s)(?=\])(?!\]\[)(?!\]\()(?!\]:)" % name, 0)) # [name]
if name[0] == '^':
# [(^)name]
markers.extend(view.find_all(r"(?<=\[)(%s)(?=\])(?!\s*\]:)" % name, 0))
regions = []
for x in markers:
scope_name = view.scope_name(x.begin())
if (hasScope(scope_name, refname_scope_name) or hasScope(scope_name, marker_text_scope_name)) and \
not hasScope(view.scope_name(x.begin()), definition_scope_name):
regions.append(x)
ids = {}
for reg in regions:
name = view.substr(reg).strip()
key = name.lower()
if key in ids:
ids[key].regions.append(reg)
else:
ids[key] = Obj(regions=[reg], label=name)
return ids
def getReferences(view, name=''):
"""Find all reference definitions."""
# returns {name -> Region}
refs = []
name = re.escape(name)
if name == '':
refs.extend(view.find_all(r"(?<=^\[)([^\]]+)(?=\]:)", 0))
else:
refs.extend(view.find_all(r"(?<=^\[)(%s)(?=\]:)" % name, 0))
regions = refs
ids = {}
for reg in regions:
name = view.substr(reg).strip()
key = name.lower()
if key in ids:
ids[key].regions.append(reg)
else:
ids[key] = Obj(regions=[reg], label=name)
return ids
def isMarkerDefined(view, name):
"""Return True if a marker is defined by that name."""
return len(getReferences(view, name)) > 0
def getCurrentScopeRegion(view, pt):
"""Extend the region under current scope."""
scope = view.scope_name(pt)
l = pt
while l > 0 and view.scope_name(l - 1) == scope:
l -= 1
r = pt
while r < view.size() and view.scope_name(r) == scope:
r += 1
return sublime.Region(l, r)
def findScopeFrom(view, pt, scope, backwards=False, char=None):
"""Find the nearest position of a scope from given position."""
if backwards:
while pt >= 0 and (not hasScope(view.scope_name(pt), scope) or
(char is not None and view.substr(pt) != char)):
pt -= 1
else:
while pt < view.size() and (not hasScope(view.scope_name(pt), scope) or
(char is not None and view.substr(pt) != char)):
pt += 1
return pt
def get_reference(view, pos):
"""Try to match a marker or reference on given position. Return a tuple (matched, is_definition, name)."""
scope = view.scope_name(pos).split(" ")
if definition_scope_name in scope or footnote_scope_name in scope:
if refname_scope_name in scope:
# Definition name
defname = view.substr(getCurrentScopeRegion(view, pos))
elif refname_start_scope_name in scope:
# Starting "["
defname = view.substr(getCurrentScopeRegion(view, pos + 1))
else:
# URL or footnote
marker_pt = findScopeFrom(view, pos, refname_scope_name, True)
defname = view.substr(getCurrentScopeRegion(view, marker_pt))
return (True, True, defname)
elif marker_scope_name in scope or marker_image_scope_name in scope or marker_literal_scope_name in scope:
if refname_scope_name in scope:
# defname name
defname = view.substr(getCurrentScopeRegion(view, pos))
else:
# Text
if marker_begin_scope_name in scope:
pos += 1
while pos >= 0 and view.substr(sublime.Region(pos, pos + 1)) in '[]':
pos -= 1
if not (marker_scope_name in scope or marker_image_scope_name in scope or marker_literal_scope_name in scope):
return (False, None, None)
marker_text_end = findScopeFrom(view, pos, marker_text_end_scope_name) + 1
if hasScope(view.scope_name(marker_text_end), refname_start_scope_name) and not hasScope(view.scope_name(marker_text_end + 1), marker_end_scope_name):
# of [Text][name] struct
marker_pt = marker_text_end + 1
marker_pt_end = findScopeFrom(view, marker_pt, marker_end_scope_name)
defname = view.substr(sublime.Region(marker_pt, marker_pt_end))
else:
# of [Text] struct or [Text][] struct
defname = view.substr(getCurrentScopeRegion(view, pos))
return (True, False, defname)
else:
return (False, None, None)
class ReferenceJumpCommand(MDETextCommand):
"""Jump between definition and reference."""
def description(self):
"""Description for package control."""
return 'Jump between definition and reference'
def run(self, edit):
"""Run command callback."""
view = self.view
edit_regions = []
markers = getMarkers(view)
refs = getReferences(view)
missing_markers = []
missing_refs = []
for sel in view.sel():
matched, is_definition, defname = get_reference(view, sel.begin())
if matched:
defname_key = defname.lower()
if is_definition:
if defname_key in markers:
edit_regions.extend(markers[defname_key].regions)
else:
missing_markers.append(defname)
else:
if defname_key in refs:
edit_regions.extend(refs[defname_key].regions)
else:
missing_refs.append(defname)
if len(edit_regions) > 0:
sels = view.sel()
sels.clear()
sels.add_all(edit_regions)
view.show(edit_regions[0])
if len(missing_refs) + len(missing_markers) > 0:
# has something missing
if len(missing_markers) == 0:
sublime.status_message("The definition%s of %s cannot be found." % ("" if len(missing_refs) == 1 else "s", ", ".join(missing_refs)))
elif len(missing_refs) == 0:
sublime.status_message("The marker%s of %s cannot be found." % ("" if len(missing_markers) == 1 else "s", ", ".join(missing_markers)))
else:
sublime.status_message("The definition%s of %s and the marker%s of %s cannot be found." % ("" if len(missing_refs) == 1 else "s", ", ".join(missing_refs), "" if len(missing_markers) == 1 else "s", ", ".join(missing_markers)))
class ReferenceJumpContextCommand(ReferenceJumpCommand):
"""Jump between definition and reference. Used in context menu."""
def is_visible(self):
"""Return True if cursor is on a marker or reference."""
return ReferenceJumpCommand.is_visible(self) and any(get_reference(self.view, sel.begin())[0] for sel in self.view.sel())
def is_url(contents):
"""Return if contents contains an URL."""
re_match_urls = re.compile(r"""((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|(([^\s()<>]+|(([^\s()<>]+)))*))+(?:(([^\s()<>]+|(([^\s()<>]+)))*)|[^\s`!()[]{};:'".,<>?«»“”‘’]))""", re.DOTALL)
m = re_match_urls.search(contents)
return True if m else False
def mangle_url(url):
"""Mangle URL for links."""
url = url.strip()
if re.match(r'^([a-z0-9-]+\.)+\w{2,4}', url, re.IGNORECASE):
url = 'http://' + url
return url
def append_reference_link(edit, view, name, url):
r"""Detect if file ends with \n."""
if view.substr(view.size() - 1) == '\n':
nl = ''
else:
nl = '\n'
# Append the new reference link to the end of the file
edit_position = view.size() + len(nl) + 1
view.insert(edit, view.size(), '{0}[{1}]: {2}\n'.format(nl, name, url))
return sublime.Region(edit_position, edit_position + len(name))
def suggest_default_link_name(name, image):
"""Suggest default link name in camel case."""
ret = ''
name_segs = name.split()
if len(name_segs) > 1:
for word in name_segs:
ret += word.capitalize()
if len(ret) > 30:
break
return ('image' if image else '') + ret
else:
return name
def check_for_link(view, link):
"""Check if the link already defined. Return the name if so."""
refs = getReferences(view)
link = link.strip()
for name in refs:
link_begin = findScopeFrom(view, refs[name].regions[0].begin(), ref_link_scope_name)
reg = getCurrentScopeRegion(view, link_begin)
found_link = view.substr(reg).strip()
if found_link == link:
return name
return None
class ReferenceNewReferenceCommand(MDETextCommand):
"""Create a new reference."""
def run(self, edit, image=False):
"""Run command callback."""
view = self.view
edit_regions = []
contents = sublime.get_clipboard().strip()
link = mangle_url(contents) if is_url(contents) else ""
suggested_name = ""
if len(link) > 0:
# If link already exists, reuse existing reference
suggested_link_name = suggested_name = check_for_link(view, link)
for sel in view.sel():
text = view.substr(sel)
if not suggested_name:
suggested_link_name = suggest_default_link_name(text, image)
suggested_name = suggested_link_name if suggested_link_name != text else ""
edit_position = sel.end() + 3
if image:
edit_position += 1
view.replace(edit, sel, "![" + text + "][" + suggested_name + "]")
else:
view.replace(edit, sel, "[" + text + "][" + suggested_name + "]")
edit_regions.append(sublime.Region(edit_position, edit_position + len(suggested_name)))
if len(edit_regions) > 0:
selection = view.sel()
selection.clear()
reference_region = append_reference_link(edit, view, suggested_link_name, link)
selection.add(reference_region)
selection.add_all(edit_regions)
class ReferenceNewInlineLinkCommand(MDETextCommand):
"""Create a new inline link."""
def run(self, edit, image=False):
"""Run command callback."""
view = self.view
contents = sublime.get_clipboard().strip()
link = mangle_url(contents) if is_url(contents) else ""
link = link.replace("$", "\\$")
if image:
view.run_command("insert_snippet", {"contents": ""})
else:
view.run_command("insert_snippet", {"contents": "[${1:$SELECTION}](${2:" + link + "})"})
class ReferenceNewInlineImage(MDETextCommand):
"""Create a new inline image."""
def run(self, edit):
"""Run command callback."""
self.view.run_command("reference_new_inline_link", {"image": True})
class ReferenceNewImage(MDETextCommand):
"""Create a new image."""
def run(self, edit):
"""Run command callback."""
self.view.run_command("reference_new_reference", {"image": True})
def get_next_footnote_marker(view):
"""Get the number of the next footnote."""
refs = getReferences(view)
footnotes = [int(ref[1:]) for ref in refs if view.substr(refs[ref].regions[0])[0] == "^"]
def target_loc(num):
return (num - 1) % len(footnotes)
for i in range(len(footnotes)):
footnote = footnotes[i]
tl = target_loc(footnote)
# footnotes = [1 2 {4} 5], i = 2, footnote = 4, tl = 3
while tl != i:
target_fn = footnotes[tl]
ttl = target_loc(target_fn)
# target_fn = 5, ttl = 0
if ttl != tl or target_fn > footnote:
footnotes[i], footnotes[tl] = footnotes[tl], footnotes[i]
tl, footnote = ttl, target_fn
# [1 2 {5} 4]
else:
break
for i in range(len(footnotes)):
if footnotes[i] != i + 1:
return i + 1
return len(footnotes) + 1
class ReferenceNewFootnote(MDETextCommand):
"""Create a new footnote."""
def run(self, edit):
"""Run command callback."""
view = self.view
markernum = get_next_footnote_marker(view)
markernum_str = '[^%s]' % markernum
for sel in view.sel():
startloc = sel.end()
if bool(view.size()):
targetloc = view.find('(\s|$)', startloc).begin()
else:
targetloc = 0
view.insert(edit, targetloc, markernum_str)
if len(view.sel()) > 0:
view.show(view.size())
view.insert(edit, view.size(), '\n' + markernum_str + ': ')
view.sel().clear()
view.sel().add(sublime.Region(view.size(), view.size()))
class ReferenceDeleteReference(MDETextCommand):
"""Delete a reference."""
def run(self, edit):
"""Run command callback."""
view = self.view
edit_regions = []
markers = getMarkers(view)
refs = getReferences(view)
for sel in view.sel():
matched, is_definition, defname = get_reference(view, sel.begin())
if matched:
defname_key = defname.lower()
if defname_key in markers:
for marker in markers[defname_key].regions:
if defname[0] == "^":
edit_regions.append(sublime.Region(marker.begin() - 1, marker.end() + 1))
else:
l = findScopeFrom(view, marker.begin(), marker_begin_scope_name, True)
if l > 0 and view.substr(sublime.Region(l - 1, l)) == "!":
edit_regions.append(sublime.Region(l - 1, l + 1))
else:
edit_regions.append(sublime.Region(l, l + 1))
if hasScope(view.scope_name(marker.end()), marker_text_end_scope_name):
if view.substr(sublime.Region(marker.end() + 1, marker.end() + 2)) == '[':
# [Text][]
r = findScopeFrom(view, marker.end(), marker_end_scope_name, False)
edit_regions.append(sublime.Region(marker.end(), r + 1))
else:
# [Text]
edit_regions.append(sublime.Region(marker.end(), marker.end() + 1))
else:
# [Text][name]
r = findScopeFrom(view, marker.begin(), marker_text_end_scope_name, True)
edit_regions.append(sublime.Region(r, marker.end() + 1))
if defname_key in refs:
for ref in refs[defname_key].regions:
edit_regions.append(view.full_line(ref.begin()))
if len(edit_regions) > 0:
sel = view.sel()
sel.clear()
sel.add_all(edit_regions)
def delete_all(index):
if index == 0:
view.run_command("left_delete")
view.window().show_quick_panel(["Delete the References", "Preview the Changes"], delete_all, sublime.MONOSPACE_FONT)
class ReferenceOrganize(MDETextCommand):
"""Sort and report all references."""
def run(self, edit):
"""Run command callback."""
view = self.view
# reorder
markers = getMarkers(view)
marker_order = sorted(markers.keys(), key=lambda marker: min(markers[marker].regions, key=lambda reg: reg.a).a)
marker_order = dict(zip(marker_order, range(0, len(marker_order))))
refs = getReferences(view)
flatrefs = []
flatfns = []
sel = view.sel()
sel.clear()
for name in refs:
for link_reg in refs[name].regions:
line_reg = view.full_line(link_reg)
if name[0] == "^":
flatfns.append((name, view.substr(line_reg).strip("\n")))
else:
flatrefs.append((name, view.substr(line_reg).strip("\n")))
sel.add(line_reg)
flatfns.sort(key=operator.itemgetter(0))
flatrefs.sort(key=lambda x: marker_order[x[0].lower()] if x[0].lower() in marker_order else 9999)
view.run_command("left_delete")
if view.size() >= 2 and view.substr(sublime.Region(view.size() - 2, view.size())) == "\n\n":
view.erase(edit, sublime.Region(view.size() - 1, view.size()))
for fn_tuple in flatfns:
view.insert(edit, view.size(), fn_tuple[1])
view.insert(edit, view.size(), "\n")
view.insert(edit, view.size(), "\n")
for ref_tuple in flatrefs:
view.insert(edit, view.size(), ref_tuple[1])
view.insert(edit, view.size(), "\n")
# delete duplicate / report conflict
sel.clear()
refs = getReferences(view)
conflicts = {}
unique_links = {}
output = ""
for name in refs:
if name[0] == '^':
continue
n_links = len(refs[name].regions)
if n_links > 1:
for ref in refs[name].regions:
link_begin = findScopeFrom(view, ref.end(), ref_link_scope_name)
link = view.substr(getCurrentScopeRegion(view, link_begin))
if name in unique_links:
if link == unique_links[name]:
output += "%s has duplicate value of %s\n" % (refs[name].label, link)
sel.add(view.full_line(ref.begin()))
elif name in conflicts:
conflicts[name].append(link)
else:
conflicts[name] = [link]
else:
unique_links[name] = link
# view.run_command("left_delete")
for name in conflicts:
output += "%s has conflict values: %s with %s\n" % (refs[name].label, unique_links[name], ", ".join(conflicts[name]))
# report missing
refs = getReferences(view)
lower_refs = [ref.lower() for ref in refs]
missings = []
for ref in refs:
if ref not in marker_order:
missings.append(refs[ref].label)
if len(missings) > 0:
output += "Error: Definition [%s] %s no reference\n" % (", ".join(missings), "have" if len(missings) > 1 else "has")
missings = []
for marker in markers:
if marker not in lower_refs:
missings.append(markers[marker].label)
if len(missings) > 0:
output += "Error: [%s] %s no definition\n" % (", ".join(missings), "have" if len(missings) > 1 else "has")
# sel.clear()
if len(output) == 0:
output = "All references are well defined :)\n"
output += "===================\n"
def get_times_string(n):
if n == 0:
return "0 time"
elif n == 1:
return "1 time"
else:
return "%i times" % n
output += "\n".join(('[%s] is referenced %s' % (markers[m].label, get_times_string(len(markers[m].regions)))) for m in markers)
window = view.window()
output_panel = window.create_output_panel("mde")
output_panel.run_command('erase_view')
output_panel.run_command('append', {'characters': output})
window.run_command("show_panel", {"panel": "output.mde"})
class GatherMissingLinkMarkersCommand(MDETextCommand):
"""Gather all missing references and creates them."""
def run(self, edit):
"""Run command callback."""
view = self.view
refs = getReferences(view)
markers = getMarkers(view)
missings = []
for marker in markers:
if marker not in refs:
missings.append(marker)
if len(missings):
# Remove all whitespace at the end of the file
whitespace_at_end = view.find(r'\s*\z', 0)
view.replace(edit, whitespace_at_end, "\n")
# If there is not already a reference list at the end, insert a new line at the end
if not view.find(r'\n\s*\[[^\]]*\]:.*\s*\z', 0):
view.insert(edit, view.size(), "\n")
for link in missings:
view.insert(edit, view.size(), '[%s]: \n' % link)
def convert2ref(view, edit, link_span, name, omit_name=False):
"""Convert single link to reference."""
view.sel().clear()
link = view.substr(sublime.Region(link_span.a + 1, link_span.b - 1))
if omit_name:
view.replace(edit, link_span, '[]')
link_span = sublime.Region(link_span.a + 1, link_span.a + 1)
offset = len(link)
else:
view.replace(edit, link_span, '[%s]' % name)
link_span = sublime.Region(link_span.a + 1, link_span.a + 1 + len(name))
offset = len(link) - len(name)
view.sel().add(link_span)
view.show_at_center(link_span)
_viewsize = view.size()
view.insert(edit, _viewsize, '[%s]: %s\n' % (name, link))
reference_span = sublime.Region(_viewsize + 1, _viewsize + 1 + len(name))
view.sel().add(reference_span)
return offset
class ConvertInlineLinkToReferenceCommand(MDETextCommand):
"""Convert an inline link to reference."""
def is_visible(self):
"""Return True if cursor is on a marker or reference."""
for sel in self.view.sel():
scope_name = self.view.scope_name(sel.b)
if hasScope(scope_name, 'meta.link.inline.markdown'):
return True
return False
def run(self, edit, name=None):
"""Run command callback."""
view = self.view
pattern = r"\[([^\]]+)\]\((?!#)([^\)]+)\)"
# Remove all whitespace at the end of the file
whitespace_at_end = view.find(r'\s*\z', 0)
view.replace(edit, whitespace_at_end, "\n")
# If there is not already a reference list at the end, insert a new line at the end
if not view.find(r'\n\s*\[[^\]]*\]:.*\s*\z', 0):
view.insert(edit, view.size(), "\n")
link_spans = []
for sel in view.sel():
scope_name = view.scope_name(sel.b)
if not hasScope(scope_name, 'meta.link.inline.markdown'):
continue
start = findScopeFrom(view, sel.b, marker_begin_scope_name, backwards=True)
end = findScopeFrom(view, sel.b, 'punctuation.definition.metadata.markdown', char=')') + 1
text = view.substr(sublime.Region(start, end))
m = re.match(pattern, text)
if m is None:
continue
text = m.group(1)
link = m.group(2)
link_span = sublime.Region(start + m.span(2)[0] - 1, start + m.span(2)[1] + 1)
if is_url(link):
link = mangle_url(link)
if len(link) > 0:
if name is None:
# If link already exists, reuse existing reference
suggested_name = check_for_link(view, link)
if suggested_name is None:
is_image = view.substr(start - 1) == '!' if start > 0 else False
suggested_name = suggest_default_link_name(text, is_image)
_name = name if name is not None else suggested_name
link_spans.append((link_span, _name, _name == text))
offset = 0
for link_span in link_spans:
_link_span = sublime.Region(link_span[0].a + offset, link_span[0].b + offset)
offset -= convert2ref(view, edit, _link_span, link_span[1], link_span[2])
class ConvertInlineLinksToReferencesCommand(MDETextCommand):
"""Convert inline links to references."""
def run(self, edit):
"""Run command callback."""
view = self.view
pattern = r"(?<=\]\()(?!#)([^\)]+)(?=\))"
_sel = []
for sel in view.sel():
_sel.append(sel)
view.sel().clear()
view.sel().add_all(view.find_all(pattern))
view.run_command('convert_inline_link_to_reference')
| mit |
gdelpierre/ansible-modules-core | utilities/helper/meta.py | 11 | 3274 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, a Red Hat company
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: meta
short_description: Execute Ansible 'actions'
version_added: "1.2"
description:
- Meta tasks are a special kind of task which can influence Ansible internal execution or state. Prior to Ansible 2.0,
the only meta option available was `flush_handlers`. As of 2.2, there are five meta tasks which can be used.
Meta tasks can be used anywhere within your playbook.
options:
free_form:
description:
- This module takes a free form command, as a string. There's not an actual option named "free form". See the examples!
- "C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays)."
- "C(refresh_inventory) (added in 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be re-executed. This is mainly useful when additional hosts are created and users wish to use them instead of using the `add_host` module."
- "C(noop) (added in 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use."
- "C(clear_facts) (added in 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared, including the fact cache."
- "C(clear_host_errors) (added in 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts."
- "C(end_play) (added in 2.2) causes the play to end without failing the host."
choices: ['noop', 'flush_handlers', 'refresh_inventory', 'clear_facts', 'clear_host_errors', 'end_play']
required: true
default: null
notes:
- meta is not really a module nor action_plugin as such it cannot be overwritten.
author:
- "Ansible Core Team"
'''
EXAMPLES = '''
# force all notified handlers to run at this point, not waiting for normal sync points
- template: src=new.j2 dest=/etc/config.txt
notify: myhandler
- meta: flush_handlers
# reload inventory, useful with dynamic inventories when play makes changes to the existing hosts
- cloud_guest: name=newhost state=present # this is fake module
- meta: refresh_inventory
# clear gathered facts from all currently targeted hosts
- meta: clear_facts
# bring host back to play after failure
- copy: src=file dest=/etc/file
remote_user: imightnothavepermission
- meta: clear_host_errors
'''
| gpl-3.0 |
tangentlabs/wagtail | wagtail/wagtailsearch/tests/test_backends.py | 18 | 7650 | import unittest
import time
from django.test import TestCase
from django.test.utils import override_settings
from django.conf import settings
from django.core import management
from django.utils.six import StringIO
from wagtail.tests.utils import WagtailTestUtils
from wagtail.tests.search import models
from wagtail.wagtailsearch.backends import get_search_backend, get_search_backends, InvalidSearchBackendError
from wagtail.wagtailsearch.backends.db import DBSearch
class BackendTests(WagtailTestUtils):
# To test a specific backend, subclass BackendTests and define self.backend_path.
def setUp(self):
# Search WAGTAILSEARCH_BACKENDS for an entry that uses the given backend path
for backend_name, backend_conf in settings.WAGTAILSEARCH_BACKENDS.items():
if backend_conf['BACKEND'] == self.backend_path:
self.backend = get_search_backend(backend_name)
self.backend_name = backend_name
break
else:
# no conf entry found - skip tests for this backend
raise unittest.SkipTest("No WAGTAILSEARCH_BACKENDS entry for the backend %s" % self.backend_path)
self.load_test_data()
def load_test_data(self):
# Reset the index
self.backend.reset_index()
self.backend.add_type(models.SearchTest)
self.backend.add_type(models.SearchTestChild)
# Create a test database
testa = models.SearchTest()
testa.title = "Hello World"
testa.save()
self.backend.add(testa)
self.testa = testa
testb = models.SearchTest()
testb.title = "Hello"
testb.live = True
testb.save()
self.backend.add(testb)
self.testb = testb
testc = models.SearchTestChild()
testc.title = "Hello"
testc.live = True
testc.save()
self.backend.add(testc)
self.testc = testc
testd = models.SearchTestChild()
testd.title = "World"
testd.save()
self.backend.add(testd)
self.testd = testd
# Refresh the index
self.backend.refresh_index()
def test_blank_search(self):
results = self.backend.search("", models.SearchTest)
self.assertEqual(set(results), set())
def test_search(self):
results = self.backend.search("Hello", models.SearchTest)
self.assertEqual(set(results), {self.testa, self.testb, self.testc.searchtest_ptr})
results = self.backend.search("World", models.SearchTest)
self.assertEqual(set(results), {self.testa, self.testd.searchtest_ptr})
def test_callable_indexed_field(self):
results = self.backend.search("Callable", models.SearchTest)
self.assertEqual(set(results), {self.testa, self.testb, self.testc.searchtest_ptr, self.testd.searchtest_ptr})
def test_filters(self):
results = self.backend.search(None, models.SearchTest, filters=dict(live=True))
self.assertEqual(set(results), {self.testb, self.testc.searchtest_ptr})
def test_filters_with_in_lookup(self):
live_page_titles = models.SearchTest.objects.filter(live=True).values_list('title', flat=True)
results = self.backend.search(None, models.SearchTest, filters=dict(title__in=live_page_titles))
self.assertEqual(set(results), {self.testb, self.testc.searchtest_ptr})
def test_single_result(self):
result = self.backend.search(None, models.SearchTest)[0]
self.assertIsInstance(result, models.SearchTest)
def test_sliced_results(self):
sliced_results = self.backend.search(None, models.SearchTest)[1:3]
self.assertEqual(len(sliced_results), 2)
for result in sliced_results:
self.assertIsInstance(result, models.SearchTest)
def test_child_model(self):
results = self.backend.search(None, models.SearchTestChild)
self.assertEqual(set(results), {self.testc, self.testd})
def test_delete(self):
# Delete one of the objects
self.backend.delete(self.testa)
self.testa.delete()
self.backend.refresh_index()
results = self.backend.search(None, models.SearchTest)
self.assertEqual(set(results), {self.testb, self.testc.searchtest_ptr, self.testd.searchtest_ptr})
def test_update_index_command(self):
# Reset the index, this should clear out the index
self.backend.reset_index()
# Give Elasticsearch some time to catch up...
time.sleep(1)
results = self.backend.search(None, models.SearchTest)
self.assertEqual(set(results), set())
# Run update_index command
with self.ignore_deprecation_warnings(): # ignore any DeprecationWarnings thrown by models with old-style indexed_fields definitions
management.call_command('update_index', backend_name=self.backend_name, interactive=False, stdout=StringIO())
results = self.backend.search(None, models.SearchTest)
self.assertEqual(set(results), {self.testa, self.testb, self.testc.searchtest_ptr, self.testd.searchtest_ptr})
@override_settings(
WAGTAILSEARCH_BACKENDS={
'default': {'BACKEND': 'wagtail.wagtailsearch.backends.db'}
}
)
class TestBackendLoader(TestCase):
def test_import_by_name(self):
db = get_search_backend(backend='default')
self.assertIsInstance(db, DBSearch)
def test_import_by_path(self):
db = get_search_backend(backend='wagtail.wagtailsearch.backends.db')
self.assertIsInstance(db, DBSearch)
def test_import_by_full_path(self):
db = get_search_backend(backend='wagtail.wagtailsearch.backends.db.DBSearch')
self.assertIsInstance(db, DBSearch)
def test_nonexistent_backend_import(self):
self.assertRaises(InvalidSearchBackendError, get_search_backend, backend='wagtail.wagtailsearch.backends.doesntexist')
def test_invalid_backend_import(self):
self.assertRaises(InvalidSearchBackendError, get_search_backend, backend="I'm not a backend!")
def test_get_search_backends(self):
backends = list(get_search_backends())
self.assertEqual(len(backends), 1)
self.assertIsInstance(backends[0], DBSearch)
@override_settings(
WAGTAILSEARCH_BACKENDS={
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.db'
},
'another-backend': {
'BACKEND': 'wagtail.wagtailsearch.backends.db'
},
}
)
def test_get_search_backends_multiple(self):
backends = list(get_search_backends())
self.assertEqual(len(backends), 2)
def test_get_search_backends_with_auto_update(self):
backends = list(get_search_backends(with_auto_update=True))
# Auto update is the default
self.assertEqual(len(backends), 1)
@override_settings(
WAGTAILSEARCH_BACKENDS={
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.db',
'AUTO_UPDATE': False,
},
}
)
def test_get_search_backends_with_auto_update_disabled(self):
backends = list(get_search_backends(with_auto_update=True))
self.assertEqual(len(backends), 0)
@override_settings(
WAGTAILSEARCH_BACKENDS={
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.db',
'AUTO_UPDATE': False,
},
}
)
def test_get_search_backends_without_auto_update_disabled(self):
backends = list(get_search_backends())
self.assertEqual(len(backends), 1)
| bsd-3-clause |
kxliugang/edx-platform | common/djangoapps/util/model_utils.py | 31 | 6782 | """
Utilities for django models.
"""
import unicodedata
import re
from eventtracking import tracker
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django_countries.fields import Country
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
USER_SETTINGS_CHANGED_EVENT_NAME = u'edx.user.settings.changed'
def get_changed_fields_dict(instance, model_class):
"""
Helper method for tracking field changes on a model.
Given a model instance and class, return a dict whose keys are that
instance's fields which differ from the last saved ones and whose values
are the old values of those fields. Related fields are not considered.
Args:
instance (Model instance): the model instance with changes that are
being tracked
model_class (Model class): the class of the model instance we are
tracking
Returns:
dict: a mapping of field names to current database values of those
fields, or an empty dict if the model is new
"""
try:
old_model = model_class.objects.get(pk=instance.pk)
except model_class.DoesNotExist:
# Object is new, so fields haven't technically changed. We'll return
# an empty dict as a default value.
return {}
else:
field_names = [
field[0].name for field in model_class._meta.get_fields_with_model()
]
changed_fields = {
field_name: getattr(old_model, field_name) for field_name in field_names
if getattr(old_model, field_name) != getattr(instance, field_name)
}
return changed_fields
def emit_field_changed_events(instance, user, db_table, excluded_fields=None, hidden_fields=None):
"""Emits a settings changed event for each field that has changed.
Note that this function expects that a `_changed_fields` dict has been set
as an attribute on `instance` (see `get_changed_fields_dict`.
Args:
instance (Model instance): the model instance that is being saved
user (User): the user that this instance is associated with
db_table (str): the name of the table that we're modifying
excluded_fields (list): a list of field names for which events should
not be emitted
hidden_fields (list): a list of field names specifying fields whose
values should not be included in the event (None will be used
instead)
Returns:
None
"""
def clean_field(field_name, value):
"""
Prepare a field to be emitted in a JSON serializable format. If
`field_name` is a hidden field, return None.
"""
if field_name in hidden_fields:
return None
# Country is not JSON serializable. Return the country code.
if isinstance(value, Country):
if value.code:
return value.code
else:
return None
return value
excluded_fields = excluded_fields or []
hidden_fields = hidden_fields or []
changed_fields = getattr(instance, '_changed_fields', {})
for field_name in changed_fields:
if field_name not in excluded_fields:
old_value = clean_field(field_name, changed_fields[field_name])
new_value = clean_field(field_name, getattr(instance, field_name))
emit_setting_changed_event(user, db_table, field_name, old_value, new_value)
# Remove the now inaccurate _changed_fields attribute.
if hasattr(instance, '_changed_fields'):
del instance._changed_fields
def truncate_fields(old_value, new_value):
"""
Truncates old_value and new_value for analytics event emission if necessary.
Args:
old_value(obj): the value before the change
new_value(obj): the new value being saved
Returns:
a dictionary with the following fields:
'old': the truncated old value
'new': the truncated new value
'truncated': the list of fields that have been truncated
"""
# Compute the maximum value length so that two copies can fit into the maximum event size
# in addition to all the other fields recorded.
max_value_length = settings.TRACK_MAX_EVENT / 4
serialized_old_value, old_was_truncated = _get_truncated_setting_value(old_value, max_length=max_value_length)
serialized_new_value, new_was_truncated = _get_truncated_setting_value(new_value, max_length=max_value_length)
truncated_values = []
if old_was_truncated:
truncated_values.append("old")
if new_was_truncated:
truncated_values.append("new")
return {'old': serialized_old_value, 'new': serialized_new_value, 'truncated': truncated_values}
def emit_setting_changed_event(user, db_table, setting_name, old_value, new_value):
"""Emits an event for a change in a setting.
Args:
user (User): the user that this setting is associated with.
db_table (str): the name of the table that we're modifying.
setting_name (str): the name of the setting being changed.
old_value (object): the value before the change.
new_value (object): the new value being saved.
Returns:
None
"""
truncated_fields = truncate_fields(old_value, new_value)
truncated_fields['setting'] = setting_name
truncated_fields['user_id'] = user.id
truncated_fields['table'] = db_table
tracker.emit(
USER_SETTINGS_CHANGED_EVENT_NAME,
truncated_fields
)
def _get_truncated_setting_value(value, max_length=None):
"""
Returns the truncated form of a setting value.
Returns:
truncated_value (object): the possibly truncated version of the value.
was_truncated (bool): returns true if the serialized value was truncated.
"""
if isinstance(value, basestring) and max_length is not None and len(value) > max_length:
return value[0:max_length], True
else:
return value, False
# Taken from Django 1.8 source code because it's not supported in 1.4
def slugify(value):
"""Converts value into a string suitable for readable URLs.
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
Args:
value (string): String to slugify.
"""
value = force_unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
| agpl-3.0 |
thaole16/Boids | boids/boids.py | 1 | 4866 | """
A refactored implementation of Boids from a deliberately bad implementation of
[Boids](http://dl.acm.org/citation.cfm?doid=37401.37406): an exercise for class.
"""
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
class Boids(object):
def __init__(self,
boid_count=50,
x_positions=[-450, 50.0],
y_positions=[300.0, 600.0],
x_velocities=[0, 10.0],
y_velocities=[-20.0, 20.0],
move_to_middle_strength=0.01,
alert_distance=100,
formation_flying_distance=10000,
formation_flying_strength=0.125):
self.boid_count = boid_count
self.move_to_middle_strength = move_to_middle_strength
self.alert_distance = alert_distance
self.formation_flying_distance = formation_flying_distance
self.formation_flying_strength = formation_flying_strength
self.boids_x = np.random.uniform(size=boid_count, *x_positions)
self.boids_y = np.random.uniform(size=boid_count, *y_positions)
self.positions = np.stack((self.boids_x, self.boids_y))
self.boid_x_velocities = np.random.uniform(size=boid_count, *x_velocities)
self.boid_y_velocities = np.random.uniform(size=boid_count, *y_velocities)
self.velocities = np.stack((self.boid_x_velocities, self.boid_y_velocities))
self.boids = (self.positions, self.velocities)
def fly_towards_the_middle(self, boids, move_to_middle_strength=0.01):
(positions, velocities) = boids
middle = np.mean(positions, 1)
move_to_middle = (middle[:, np.newaxis] - positions) * move_to_middle_strength
velocities += move_to_middle
def separation(self, coords):
separations = np.array(coords)[:, np.newaxis, :] - np.array(coords)[:, :, np.newaxis]
separation_distance_squared = separations[0, :, :] ** 2 + separations[1, :, :] ** 2
return separations, separation_distance_squared
def fly_away_from_nearby_boids(self, boids, alert_distance=100):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_alert = separation_distance_squared > alert_distance
close_separations = np.copy(separations)
close_separations[0, :, :][birds_outside_alert] = 0 # x positions
close_separations[1, :, :][birds_outside_alert] = 0 # y positions
velocities += np.sum(close_separations, 1)
def match_speed_with_nearby_boids(self, boids,
formation_flying_distance=10000,
formation_flying_strength=0.125):
(positions, velocities) = boids
separations, separation_distance_squared = self.separation(positions)
birds_outside_formation = separation_distance_squared > formation_flying_distance
velocity_difference = velocities[:, np.newaxis, :] - velocities[:, :, np.newaxis]
close_formation = np.copy(velocity_difference)
close_formation[0, :, :][birds_outside_formation] = 0
close_formation[1, :, :][birds_outside_formation] = 0
velocities += -1 * np.mean(close_formation, 1) * formation_flying_strength
def update_boids(self, boids):
(positions, velocities) = boids
# Fly towards the middle
self.fly_towards_the_middle(boids, self.move_to_middle_strength)
# Fly away from nearby boids
self.fly_away_from_nearby_boids(boids, self.alert_distance)
# Try to match speed with nearby boids
self.match_speed_with_nearby_boids(boids, self.formation_flying_distance, self.formation_flying_strength)
# Update positions
positions += velocities
def _animate(self, frame):
self.update_boids(self.boids)
(positions, velocities) = self.boids
self.scatter.set_offsets(np.transpose(positions))
def model(self, xlim=(-500, 1500), ylim=(-500, 1500), frames=50, interval=50, savefile=None):
colors = np.random.rand(self.boid_count)
boidsize = np.pi * (2 * np.random.rand(self.boid_count) + 2) ** 2
figure = plt.figure()
axes = plt.axes(xlim=xlim, ylim=ylim)
self.scatter = axes.scatter(self.boids_x, self.boids_y,
s=boidsize, c=colors, alpha=0.5, edgecolors=None)
anim = animation.FuncAnimation(figure, self._animate,
frames=frames, interval=interval)
plt.xlabel('x (arbitrary units)')
plt.ylabel('y (arbitrary units)')
plt.title("Boids a'Flocking")
if savefile != None:
anim.save(savefile)
plt.show()
if __name__ == "__main__":
boidsobject = Boids()
boidsobject.model()
| mit |
Eric-Zhong/odoo | addons/resource/faces/utils.py | 433 | 3231 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import observer
import os.path
import sys
import os.path
_call_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
def get_installation_path():
try:
if sys.frozen:
path = _call_dir
else:
raise AttributeError()
except AttributeError:
path = os.path.abspath(observer.__file__)
path = os.path.split(path)[0]
path = os.path.normcase(path)
return path
def get_resource_path():
try:
if sys.frozen:
path = _call_dir
path = os.path.join(path, "resources", "faces", "gui")
else:
raise AttributeError()
except AttributeError:
path = get_installation_path()
path = os.path.join(path, "gui", "resources")
path = os.path.normcase(path)
return path
def get_template_path():
try:
if sys.frozen:
path = _call_dir
path = os.path.join(path, "resources", "faces", "templates")
else:
raise AttributeError()
except AttributeError:
path = get_installation_path()
path = os.path.join(path, "templates")
path = os.path.normcase(path)
return path
def get_howtos_path():
try:
if sys.frozen:
path = _call_dir
else:
raise AttributeError()
except AttributeError:
path = get_installation_path()
path = os.path.join(path, "howtos")
path = os.path.normcase(path)
return path
def flatten(items):
if isinstance(items, tuple):
items = list(items)
if not isinstance(items, list):
yield items
stack = [iter(items)]
while stack:
for item in stack[-1]:
if isinstance(item, tuple):
item = list(item)
if isinstance(item, list):
stack.append(iter(item))
break
yield item
else:
stack.pop()
def do_yield():
pass
def progress_start(title, maximum, message=""):
pass
def progress_update(value, message=""):
pass
def progress_end():
pass
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fhaoquan/kbengine | kbe/src/lib/python/Lib/difflib.py | 72 | 81679 | """
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import heapq
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = list(map(Match._make, non_adjacent))
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(keepends=True)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(keepends=True)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
... 'ore\ntree\nemu\n'.splitlines(True))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
yield from g
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
yield from g
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
yield from g
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(next(diff_lines_iterator))
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see a intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff = next(line_iterator)
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield next(line_pair_iterator)
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = next(line_pair_iterator)
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
| lgpl-3.0 |
Bionetbook/bionetbook | bnbapp/bionetbook/protocols/forms/verbs/resuspend.py | 2 | 1489 | from protocols.forms import forms
from core.utils import VOLUME_UNITS, CONCENTRATION_UNITS, TIME_UNITS
class ResuspendForm(forms.VerbForm):
name = "Resuspend"
slug = "resuspend"
# has_component = True
has_manual = True
layers = ['item_to_act', 'reagent', 'settify']
item_to_act = forms.CharField(required=False, help_text='what are you resuspending?', label='item to resuspend')
reagent = forms.CharField(required=False, help_text='where are you washing it with')
min_conc = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
max_conc = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
conc_units = forms.ChoiceField(required=False, choices=CONCENTRATION_UNITS )
conc_comment = forms.CharField(required=False)
min_vol = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
max_vol = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
vol_units = forms.ChoiceField(required=False, choices=VOLUME_UNITS )
vol_comment = forms.CharField(required=False)
min_time = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
max_time = forms.FloatField(required=False, widget=forms.NumberInput(attrs={'step':'any'}))
time_units = forms.ChoiceField(required=False, choices=TIME_UNITS, help_text='in seconds', initial = 'sec' )
time_comment = forms.CharField(required=False)
| mit |
EKiefer/edge-starter | py34env/Lib/site-packages/authtools/views.py | 4 | 11578 | """
Mostly equivalent to the views from django.contrib.auth.views, but
implemented as class-based views.
"""
from __future__ import unicode_literals
import warnings
from django.conf import settings
from django.contrib.auth import get_user_model, REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (AuthenticationForm, SetPasswordForm,
PasswordChangeForm, PasswordResetForm)
from django.contrib.auth.tokens import default_token_generator
from django.contrib import auth
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError:
from django.contrib.sites.models import get_current_site # Django < 1.7
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import redirect, resolve_url
from django.utils.functional import lazy
from django.utils.http import base36_to_int, is_safe_url
from django.utils import six
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, TemplateView, RedirectView
try:
from django.contrib.auth import update_session_auth_hash
except ImportError:
# Django < 1.7
def update_session_auth_hash(request, user):
pass
User = get_user_model()
def _safe_resolve_url(url):
"""
Previously, resolve_url_lazy would fail if the url was a unicode object.
See <https://github.com/fusionbox/django-authtools/issues/13> for more
information.
Thanks to GitHub user alanwj for pointing out the problem and providing
this solution.
"""
return six.text_type(resolve_url(url))
resolve_url_lazy = lazy(_safe_resolve_url, six.text_type)
class WithCurrentSiteMixin(object):
def get_current_site(self):
return get_current_site(self.request)
def get_context_data(self, **kwargs):
kwargs = super(WithCurrentSiteMixin, self).get_context_data(**kwargs)
current_site = self.get_current_site()
kwargs.update({
'site': current_site,
'site_name': current_site.name,
})
return kwargs
class WithNextUrlMixin(object):
redirect_field_name = REDIRECT_FIELD_NAME
success_url = None
def get_next_url(self):
request = self.request
redirect_to = request.POST.get(self.redirect_field_name,
request.GET.get(self.redirect_field_name, ''))
if not redirect_to:
return
if is_safe_url(redirect_to, host=self.request.get_host()):
return redirect_to
# This mixin can be mixed with FormViews and RedirectViews. They
# each use a different method to get the URL to redirect to, so we
# need to provide both methods.
def get_success_url(self):
return self.get_next_url() or super(WithNextUrlMixin, self).get_success_url()
def get_redirect_url(self, **kwargs):
return self.get_next_url() or super(WithNextUrlMixin, self).get_redirect_url(**kwargs)
def DecoratorMixin(decorator):
"""
Converts a decorator written for a function view into a mixin for a
class-based view.
::
LoginRequiredMixin = DecoratorMixin(login_required)
class MyView(LoginRequiredMixin):
pass
class SomeView(DecoratorMixin(some_decorator),
DecoratorMixin(something_else)):
pass
"""
class Mixin(object):
__doc__ = decorator.__doc__
@classmethod
def as_view(cls, *args, **kwargs):
view = super(Mixin, cls).as_view(*args, **kwargs)
return decorator(view)
Mixin.__name__ = str('DecoratorMixin(%s)' % decorator.__name__)
return Mixin
NeverCacheMixin = DecoratorMixin(never_cache)
CsrfProtectMixin = DecoratorMixin(csrf_protect)
LoginRequiredMixin = DecoratorMixin(login_required)
SensitivePostParametersMixin = DecoratorMixin(
sensitive_post_parameters('password', 'old_password', 'password1',
'password2', 'new_password1', 'new_password2')
)
class AuthDecoratorsMixin(NeverCacheMixin, CsrfProtectMixin, SensitivePostParametersMixin):
pass
class LoginView(AuthDecoratorsMixin, WithCurrentSiteMixin, WithNextUrlMixin, FormView):
form_class = AuthenticationForm
template_name = 'registration/login.html'
allow_authenticated = True
success_url = resolve_url_lazy(settings.LOGIN_REDIRECT_URL)
# BBB: This is deprecated (See LoginView.get_allow_authenticated)
disallow_authenticated = None
def get_allow_authenticated(self):
if self.disallow_authenticated is not None:
warnings.warn("disallow_authenticated is deprecated. Please use allow_authenticated",
DeprecationWarning)
return not self.disallow_authenticated
else:
return self.allow_authenticated
def dispatch(self, *args, **kwargs):
allow_authenticated = self.get_allow_authenticated()
if not allow_authenticated and self.request.user.is_authenticated():
return redirect(self.get_success_url())
return super(LoginView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
auth.login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
def get_context_data(self, **kwargs):
kwargs = super(LoginView, self).get_context_data(**kwargs)
kwargs.update({
self.redirect_field_name: self.request.GET.get(
self.redirect_field_name, '',
),
})
return kwargs
class LogoutView(NeverCacheMixin, WithCurrentSiteMixin, WithNextUrlMixin, TemplateView, RedirectView):
template_name = 'registration/logged_out.html'
permanent = False
def get(self, *args, **kwargs):
auth.logout(self.request)
# If we have a url to redirect to, do it. Otherwise render the logged-out template.
if self.get_redirect_url(**kwargs):
return RedirectView.get(self, *args, **kwargs)
else:
return TemplateView.get(self, *args, **kwargs)
class PasswordChangeView(LoginRequiredMixin, WithNextUrlMixin, AuthDecoratorsMixin, FormView):
template_name = 'registration/password_change_form.html'
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.get_user()
return kwargs
def get_user(self):
return self.request.user
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one if
# django.contrib.auth.middleware.SessionAuthenticationMiddleware
# is enabled.
update_session_auth_hash(self.request, form.user)
return super(PasswordChangeView, self).form_valid(form)
class PasswordChangeDoneView(LoginRequiredMixin, TemplateView):
template_name = 'registration/password_change_done.html'
# 4 views for password reset:
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordResetView(CsrfProtectMixin, FormView):
template_name = 'registration/password_reset_form.html'
token_generator = default_token_generator
success_url = reverse_lazy('password_reset_done')
domain_override = None
subject_template_name = 'registration/password_reset_subject.txt'
email_template_name = 'registration/password_reset_email.html'
html_email_template_name = None
from_email = None
form_class = PasswordResetForm
def form_valid(self, form):
form.save(
domain_override=self.domain_override,
subject_template_name=self.subject_template_name,
email_template_name=self.email_template_name,
token_generator=self.token_generator,
from_email=self.from_email,
request=self.request,
use_https=self.request.is_secure(),
html_email_template_name=self.html_email_template_name,
)
return super(PasswordResetView, self).form_valid(form)
class PasswordResetDoneView(TemplateView):
template_name = 'registration/password_reset_done.html'
class PasswordResetConfirmView(AuthDecoratorsMixin, FormView):
template_name = 'registration/password_reset_confirm.html'
token_generator = default_token_generator
form_class = SetPasswordForm
success_url = reverse_lazy('password_reset_complete')
def dispatch(self, *args, **kwargs):
assert self.kwargs.get('token') is not None
self.user = self.get_user()
return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return User._default_manager.all()
def get_user(self):
# django 1.5 uses uidb36, django 1.6 uses uidb64
uidb36 = self.kwargs.get('uidb36')
uidb64 = self.kwargs.get('uidb64')
assert bool(uidb36) ^ bool(uidb64)
try:
if uidb36:
uid = base36_to_int(uidb36)
else:
# urlsafe_base64_decode is not available in django 1.5
from django.utils.http import urlsafe_base64_decode
uid = urlsafe_base64_decode(uidb64)
return self.get_queryset().get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return None
def valid_link(self):
user = self.user
return user is not None and self.token_generator.check_token(user, self.kwargs.get('token'))
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def get_context_data(self, **kwargs):
kwargs = super(PasswordResetConfirmView, self).get_context_data(**kwargs)
if self.valid_link():
kwargs['validlink'] = True
else:
kwargs['validlink'] = False
kwargs['form'] = None
return kwargs
def form_valid(self, form):
if not self.valid_link():
return self.form_invalid(form)
self.save_form(form)
return super(PasswordResetConfirmView, self).form_valid(form)
def save_form(self, form):
return form.save()
class PasswordResetConfirmAndLoginView(PasswordResetConfirmView):
success_url = resolve_url_lazy(settings.LOGIN_REDIRECT_URL)
def save_form(self, form):
ret = super(PasswordResetConfirmAndLoginView, self).save_form(form)
user = auth.authenticate(username=self.user.get_username(),
password=form.cleaned_data['new_password1'])
auth.login(self.request, user)
return ret
class PasswordResetCompleteView(TemplateView):
template_name = 'registration/password_reset_complete.html'
login_url = settings.LOGIN_URL
def get_login_url(self):
return resolve_url(self.login_url)
def get_context_data(self, **kwargs):
kwargs = super(PasswordResetCompleteView, self).get_context_data(**kwargs)
kwargs['login_url'] = self.get_login_url()
return kwargs
| mit |
larsmans/numpy | numpy/random/tests/test_random.py | 9 | 31521 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns)
from numpy import random
from numpy.compat import asbytes
import sys
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be a unsigned 32 bit integers
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be a unsigned 32 bit integers
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandomDist(TestCase):
# Make sure the random distrobution return the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays, and multidimensional versions of both:
for conv in [lambda x: x,
np.asarray,
lambda x: [(i, i) for i in x],
lambda x: np.asarray([(i, i) for i in x])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_flexible(self):
# gh-4270
arr = [(0, 1), (2, 3)]
dt = np.dtype([('a', np.int32, 1), ('b', np.int32, 1)])
nparr = np.array(arr, dtype=dt)
a, b = nparr[0].copy(), nparr[1].copy()
for i in range(50):
np.random.shuffle(nparr)
assert_(a in nparr)
assert_(b in nparr)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
ma = np.ma.count_masked(a)
mb = np.ma.count_masked(b)
for i in range(50):
np.random.shuffle(a)
self.assertEqual(ma, np.ma.count_masked(a))
np.random.shuffle(b)
self.assertEqual(mb, np.ma.count_masked(b))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
np.testing.assert_raises(OverflowError, func, -np.inf, 0)
np.testing.assert_raises(OverflowError, func, 0, np.inf)
np.testing.assert_raises(OverflowError, func, fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
np.testing.assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"):
np.testing.assert_array_almost_equal(out1, out2)
else:
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
jhseu/tensorflow | tensorflow/python/kernel_tests/summary_ops_test.py | 6 | 46187 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 summary ops from summary_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine.sequential import Sequential
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SummaryOpsCoreTest(test_util.TensorFlowTestCase):
def testWrite(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write('tag', 42, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write('tag', 42, step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_metadata(self):
logdir = self.get_temp_dir()
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = 'foo'
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('obj', 0, 0, metadata=metadata)
summary_ops.write('bytes', 0, 0, metadata=metadata.SerializeToString())
m = constant_op.constant(metadata.SerializeToString())
summary_ops.write('string_tensor', 0, 0, metadata=m)
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(metadata, events[1].summary.value[0].metadata)
self.assertEqual(metadata, events[2].summary.value[0].metadata)
self.assertEqual(metadata, events[3].summary.value[0].metadata)
def testWrite_name(self):
@def_function.function
def f():
output = summary_ops.write('tag', 42, step=12, name='anonymous')
self.assertTrue(output.name.startswith('anonymous'))
f()
def testWrite_ndarray(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [[1, 2], [3, 4]], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([[1, 2], [3, 4]], to_numpy(value))
def testWrite_tensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
t = constant_op.constant([[1, 2], [3, 4]])
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', t, step=12)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_tensor_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f(t):
with writer.as_default():
summary_ops.write('tag', t, step=12)
t = constant_op.constant([[1, 2], [3, 4]])
f(t)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_stringTensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [b'foo', b'bar'], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([b'foo', b'bar'], to_numpy(value))
@test_util.run_gpu_only
def testWrite_gpuDeviceContext(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
with ops.device('/GPU:0'):
value = constant_op.constant(42.0)
step = constant_op.constant(12, dtype=dtypes.int64)
summary_ops.write('tag', value, step=step).numpy()
empty_metadata = summary_pb2.SummaryMetadata()
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertEqual(42, to_numpy(events[1].summary.value[0]))
self.assertEqual(empty_metadata, events[1].summary.value[0].metadata)
@test_util.also_run_as_tf_function
def testWrite_noDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42, step=0))
@test_util.also_run_as_tf_function
def testWrite_noStep_okayIfAlsoNoDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42))
@test_util.also_run_as_tf_function
def testWrite_noStep(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(logdir).as_default():
with self.assertRaisesRegex(ValueError, 'No step set'):
summary_ops.write('tag', 42)
def testWrite_usingDefaultStep(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
summary_ops.set_step(1)
summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
summary_ops.write('tag', 1.0)
mystep = variables.Variable(10, dtype=dtypes.int64)
summary_ops.set_step(mystep)
summary_ops.write('tag', 1.0)
mystep.assign_add(1)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertEqual(5, len(events))
self.assertEqual(1, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(10, events[3].step)
self.assertEqual(11, events[4].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
summary_ops.set_step(1)
f()
summary_ops.set_step(2)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the function was first traced.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
f()
mystep.assign_add(1)
f()
mystep.assign(10)
f()
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
summary_ops.set_step(1)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(write_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the graph was constructed.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
first_assign_op = mystep.assign_add(1)
second_assign_op = mystep.assign(10)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(mystep.initializer)
sess.run(write_op)
sess.run(first_assign_op)
sess.run(write_op)
sess.run(second_assign_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_recordIf_constant(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
self.assertTrue(summary_ops.write('default', 1, step=0))
with summary_ops.record_if(True):
self.assertTrue(summary_ops.write('set_on', 1, step=0))
with summary_ops.record_if(False):
self.assertFalse(summary_ops.write('set_off', 1, step=0))
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_constant_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
# Use assertAllEqual instead of assertTrue since it works in a defun.
self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
with summary_ops.record_if(True):
self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
with summary_ops.record_if(False):
self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_callable(self):
logdir = self.get_temp_dir()
with context.eager_mode():
step = variables.Variable(-1, dtype=dtypes.int64)
def record_fn():
step.assign_add(1)
return int(step % 2) == 0
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(record_fn):
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_callable_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
step = variables.Variable(-1, dtype=dtypes.int64)
@def_function.function
def record_fn():
step.assign_add(1)
return math_ops.equal(step % 2, 0)
@def_function.function
def f():
with writer.as_default():
with summary_ops.record_if(record_fn):
return [
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step)]
self.assertAllEqual(f(), [True, False, True])
self.assertAllEqual(f(), [False, True, False])
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_tensorInput_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64)])
def f(step):
with writer.as_default():
with summary_ops.record_if(math_ops.equal(step % 2, 0)):
return summary_ops.write('tag', 1, step=step)
self.assertTrue(f(0))
self.assertFalse(f(1))
self.assertTrue(f(2))
self.assertFalse(f(3))
self.assertTrue(f(4))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWriteRawPb(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_fromFunction(self):
logdir = self.get_temp_dir()
pb = summary_pb2.Summary()
pb.value.add().simple_value = 42.0
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write_raw_pb(pb.SerializeToString(), step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertProtoEquals(pb, events[1].summary)
def testWriteRawPb_multipleValues(self):
logdir = self.get_temp_dir()
pb1 = summary_pb2.Summary()
pb1.value.add().simple_value = 1.0
pb1.value.add().simple_value = 2.0
pb2 = summary_pb2.Summary()
pb2.value.add().simple_value = 3.0
pb3 = summary_pb2.Summary()
pb3.value.add().simple_value = 4.0
pb3.value.add().simple_value = 5.0
pb3.value.add().simple_value = 6.0
pbs = [pb.SerializeToString() for pb in (pb1, pb2, pb3)]
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write_raw_pb(pbs, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
expected_pb = summary_pb2.Summary()
for i in range(6):
expected_pb.value.add().simple_value = i + 1.0
self.assertProtoEquals(expected_pb, events[1].summary)
def testWriteRawPb_invalidValue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
with self.assertRaisesRegex(
errors.DataLossError,
'Bad tf.compat.v1.Summary binary proto tensor string'):
summary_ops.write_raw_pb('notaproto', step=12)
@test_util.also_run_as_tf_function
def testGetSetStep(self):
try:
self.assertIsNone(summary_ops.get_step())
summary_ops.set_step(1)
# Use assertAllEqual instead of assertEqual since it works in a defun.
self.assertAllEqual(1, summary_ops.get_step())
summary_ops.set_step(constant_op.constant(2))
self.assertAllEqual(2, summary_ops.get_step())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable(self):
with context.eager_mode():
try:
mystep = variables.Variable(0)
summary_ops.set_step(mystep)
self.assertAllEqual(0, summary_ops.get_step().read_value())
mystep.assign_add(1)
self.assertAllEqual(1, summary_ops.get_step().read_value())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(1, summary_ops.get_step().read_value())
summary_ops.get_step().assign_add(1)
self.assertAllEqual(2, summary_ops.get_step().read_value())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable_fromFunction(self):
with context.eager_mode():
try:
@def_function.function
def set_step(step):
summary_ops.set_step(step)
return summary_ops.get_step()
@def_function.function
def get_and_increment():
summary_ops.get_step().assign_add(1)
return summary_ops.get_step()
mystep = variables.Variable(0)
self.assertAllEqual(0, set_step(mystep))
self.assertAllEqual(0, summary_ops.get_step().read_value())
self.assertAllEqual(1, get_and_increment())
self.assertAllEqual(2, get_and_increment())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(3, get_and_increment())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.also_run_as_tf_function
def testSummaryScope(self):
with summary_ops.summary_scope('foo') as (tag, scope):
self.assertEqual('foo', tag)
self.assertEqual('foo/', scope)
with summary_ops.summary_scope('bar') as (tag, scope):
self.assertEqual('foo/bar', tag)
self.assertEqual('foo/bar/', scope)
with summary_ops.summary_scope('with/slash') as (tag, scope):
self.assertEqual('foo/with/slash', tag)
self.assertEqual('foo/with/slash/', scope)
with ops.name_scope(None, skip_on_eager=False):
with summary_ops.summary_scope('unnested') as (tag, scope):
self.assertEqual('unnested', tag)
self.assertEqual('unnested/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_defaultName(self):
with summary_ops.summary_scope(None) as (tag, scope):
self.assertEqual('summary', tag)
self.assertEqual('summary/', scope)
with summary_ops.summary_scope(None, 'backup') as (tag, scope):
self.assertEqual('backup', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_handlesCharactersIllegalForScope(self):
with summary_ops.summary_scope('f?o?o') as (tag, scope):
self.assertEqual('f?o?o', tag)
self.assertEqual('foo/', scope)
# If all characters aren't legal for a scope name, use default name.
with summary_ops.summary_scope('???', 'backup') as (tag, scope):
self.assertEqual('???', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_nameNotUniquifiedForTag(self):
constant_op.constant(0, name='foo')
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with ops.name_scope('with', skip_on_eager=False):
constant_op.constant(0, name='slash')
with summary_ops.summary_scope('with/slash') as (tag, _):
self.assertEqual('with/slash', tag)
def testAllV2SummaryOps(self):
logdir = self.get_temp_dir()
def define_ops():
result = []
# TF 2.0 summary ops
result.append(summary_ops.write('write', 1, step=0))
result.append(summary_ops.write_raw_pb(b'', step=0, name='raw_pb'))
# TF 1.x tf.contrib.summary ops
result.append(summary_ops.generic('tensor', 1, step=1))
result.append(summary_ops.scalar('scalar', 2.0, step=1))
result.append(summary_ops.histogram('histogram', [1.0], step=1))
result.append(summary_ops.image('image', [[[[1.0]]]], step=1))
result.append(summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1))
return result
with context.graph_mode():
ops_without_writer = define_ops()
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(True):
ops_recording_on = define_ops()
with summary_ops.record_if(False):
ops_recording_off = define_ops()
# We should be collecting all ops defined with a default writer present,
# regardless of whether recording was set on or off, but not those defined
# without a writer at all.
del ops_without_writer
expected_ops = ops_recording_on + ops_recording_off
self.assertCountEqual(expected_ops, summary_ops.all_v2_summary_ops())
class SummaryWriterTest(test_util.TensorFlowTestCase):
def testCreate_withInitAndClose(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
get_total = lambda: len(events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
def testCreate_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
# Returned SummaryWriter must be stored in a non-local variable so it
# lives throughout the function execution.
if not hasattr(f, 'writer'):
f.writer = summary_ops.create_file_writer_v2(logdir)
with context.eager_mode():
f()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
def testCreate_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
with context.graph_mode():
logdir_tensor = constant_op.constant(logdir)
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
summary_ops.create_file_writer_v2(logdir_tensor)
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
summary_ops.create_file_writer_v2(constant_op.constant(logdir))
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
f()
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_unpersistedResource_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
with summary_ops.create_file_writer_v2(logdir).as_default():
pass # Calling .as_default() is enough to indicate use.
with context.eager_mode():
# TODO(nickfelt): change this to a better error
with self.assertRaisesRegex(
errors.NotFoundError, 'Resource.*does not exist'):
f()
# Even though we didn't use it, an event file will have been created.
self.assertEqual(1, len(gfile.Glob(os.path.join(logdir, '*'))))
def testCreate_immediateSetAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
summary_ops.create_file_writer_v2(logdir).set_as_default()
summary_ops.flush()
finally:
# Ensure we clean up no matter how the test executes.
summary_ops._summary_state.writer = None # pylint: disable=protected-access
def testCreate_immediateAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.flush()
def testNoSharing(self):
# Two writers with the same logdir should not share state.
logdir = self.get_temp_dir()
with context.eager_mode():
writer1 = summary_ops.create_file_writer_v2(logdir)
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
writer2 = summary_ops.create_file_writer_v2(logdir)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testNoSharing_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f1():
if not hasattr(f1, 'writer'):
f1.writer = summary_ops.create_file_writer_v2(logdir)
with f1.writer.as_default():
summary_ops.write('tag', 1, step=1)
@def_function.function
def f2():
if not hasattr(f2, 'writer'):
f2.writer = summary_ops.create_file_writer_v2(logdir)
with f2.writer.as_default():
summary_ops.write('tag', 1, step=2)
with context.eager_mode():
f1()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
f2()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
f1()
f2()
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testMaxQueue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(
logdir, max_queue=1, flush_millis=999999).as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(events_from_logdir(logdir))
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(2, get_total())
# Exiting the "as_default()" should do an implicit flush
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.compat.v1.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(4, get_total())
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
@test_util.assert_no_new_tensors
def testNoMemoryLeak_graphMode(self):
logdir = self.get_temp_dir()
with context.graph_mode(), ops.Graph().as_default():
summary_ops.create_file_writer_v2(logdir)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoMemoryLeak_eagerMode(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', 1, step=0)
def testClose_preventsLaterUse(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
writer.close()
writer.close() # redundant close() is a no-op
writer.flush() # redundant flush() is a no-op
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.init()
with self.assertRaisesRegex(RuntimeError, 'already closed'):
with writer.as_default():
self.fail('should not get here')
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.set_as_default()
def testClose_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
writer.close()
self.assertNotIn(eventfile, get_open_filenames())
def testDereference_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
del writer
self.assertNotIn(eventfile, get_open_filenames())
class SummaryOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
summary_ops.trace_off()
def run_metadata(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata_graphs(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata_graphs(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def create_run_metadata(self):
step_stats = step_stats_pb2.StepStats(dev_stats=[
step_stats_pb2.DeviceStepStats(
device='cpu:0',
node_stats=[step_stats_pb2.NodeExecStats(node_name='hello')])
])
return config_pb2.RunMetadata(
function_graphs=[
config_pb2.RunMetadata.FunctionGraphs(
pre_optimization_graph=graph_pb2.GraphDef(
node=[node_def_pb2.NodeDef(name='foo')]))
],
step_stats=step_stats)
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.keras_model(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
def run_trace(self, f, step=1):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
summary_ops.trace_on(graph=True, profiler=False)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=step)
writer.close()
events = events_from_logdir(logdir)
return events[1]
@test_util.run_v2_only
def testRunMetadata_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadata_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadata_wholeRunMetadata(self):
expected_run_metadata = """
step_stats {
dev_stats {
device: "cpu:0"
node_stats {
node_name: "hello"
}
}
}
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadata_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testRunMetadataGraph_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo', skip_on_eager=False):
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadataGraph_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata_graph"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_runMetadataFragment(self):
expected_run_metadata = """
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata_graphs(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@test_util.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
summary_ops.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with test.mock.patch.object(logging, 'warn') as mock_log:
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@test_util.run_v2_only
def testKerasModel_otherExceptions(self):
model = Sequential()
with test.mock.patch.object(model, 'to_json') as mock_to_json:
with test.mock.patch.object(logging, 'warn') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring... oops')
@test_util.run_v2_only
def testTrace(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
event = self.run_trace(f)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
# Content of function_graphs is large and, for instance, device can change.
self.assertTrue(hasattr(actual_run_metadata, 'function_graphs'))
@test_util.run_v2_only
def testTrace_cannotEnableTraceInFunction(self):
@def_function.function
def f():
summary_ops.trace_on(graph=True, profiler=False)
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotEnableTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_on(graph=True, profiler=False)
self.assertRegexpMatches(
str(mock_log.call_args), 'Must enable trace in eager mode.')
@test_util.run_v2_only
def testTrace_cannotExportTraceWithoutTrace(self):
with six.assertRaisesRegex(self, ValueError,
'Must enable trace before export.'):
summary_ops.trace_export(name='foo', step=1)
@test_util.run_v2_only
def testTrace_cannotExportTraceInFunction(self):
summary_ops.trace_on(graph=True, profiler=False)
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
summary_ops.trace_export(name='foo', step=1)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args),
'Cannot export trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotExportTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_export(name='foo', step=1)
self.assertRegexpMatches(
str(mock_log.call_args),
'Can only export trace while executing eagerly.')
@test_util.run_v2_only
def testTrace_usesDefaultStep(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
try:
summary_ops.set_step(42)
event = self.run_trace(f, step=None)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def to_numpy(summary_value):
return tensor_util.MakeNdarray(summary_value.tensor)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Ziqi-Li/bknqgis | pandas/pandas/tests/io/parser/quoting.py | 18 | 5813 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0001'))
tm.assert_frame_equal(result, expected)
| gpl-2.0 |
bailey1234/hyeri7846 | lib/werkzeug/wsgi.py | 312 | 37386 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL for the current
request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and \
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| apache-2.0 |
cjayb/mne-python | mne/channels/montage.py | 1 | 41279 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Marijn van Vliet <[email protected]>
# Jona Sassenhagen <[email protected]>
# Teon Brooks <[email protected]>
# Christian Brodbeck <[email protected]>
# Stefan Appelhoff <[email protected]>
# Joan Massich <[email protected]>
#
# License: Simplified BSD
from collections import OrderedDict
from copy import deepcopy
from functools import partial
import os.path as op
import re
import numpy as np
from ..defaults import HEAD_SIZE_DEFAULT
from ..viz import plot_montage
from ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart,
_topo_to_sph, _frame_to_str, Transform,
_verbose_frames, _fit_matched_points,
_quat_to_affine)
from ..io._digitization import (_count_points_by_type,
_get_dig_eeg, _make_dig_points, write_dig,
_read_dig_fif, _format_dig_points,
_get_fid_coords, _coord_frame_const)
from ..io.meas_info import create_info
from ..io.open import fiff_open
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..utils import (warn, copy_function_doc_to_method_doc, _pl,
_check_option, _validate_type, _check_fname, _on_missing,
fill_doc, deprecated)
from ._dig_montage_utils import _read_dig_montage_egi
from ._dig_montage_utils import _parse_brainvision_dig_montage
_BUILT_IN_MONTAGES = [
'EGI_256',
'GSN-HydroCel-128', 'GSN-HydroCel-129', 'GSN-HydroCel-256',
'GSN-HydroCel-257', 'GSN-HydroCel-32', 'GSN-HydroCel-64_1.0',
'GSN-HydroCel-65_1.0',
'biosemi128', 'biosemi16', 'biosemi160', 'biosemi256',
'biosemi32', 'biosemi64',
'easycap-M1', 'easycap-M10',
'mgh60', 'mgh70',
'standard_1005', 'standard_1020', 'standard_alphabetic',
'standard_postfixed', 'standard_prefixed', 'standard_primed'
]
def _check_get_coord_frame(dig):
_MSG = 'Only single coordinate frame in dig is supported'
dig_coord_frames = set([d['coord_frame'] for d in dig])
assert len(dig_coord_frames) <= 1, _MSG
return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None
def get_builtin_montages():
"""Get a list of all builtin montages.
Returns
-------
montages : list
Names of all builtin montages that can be used by
:func:`make_standard_montage`.
"""
return _BUILT_IN_MONTAGES
def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None,
hsp=None, hpi=None, coord_frame='unknown'):
r"""Make montage from arrays.
Parameters
----------
ch_pos : dict
Dictionary of channel positions. Keys are channel names and values
are 3D coordinates - array of shape (3,) - in native digitizer space
in m.
nasion : None | array, shape (3,)
The position of the nasion fiducial point.
This point is assumed to be in the native digitizer space in m.
lpa : None | array, shape (3,)
The position of the left periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
rpa : None | array, shape (3,)
The position of the right periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
hsp : None | array, shape (n_points, 3)
This corresponds to an array of positions of the headshape points in
3d. These points are assumed to be in the native digitizer space in m.
hpi : None | array, shape (n_hpi, 3)
This corresponds to an array of HPI points in the native digitizer
space. They only necessary if computation of a ``compute_dev_head_t``
is True.
coord_frame : str
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_egi
read_dig_fif
read_dig_polhemus_isotrak
"""
if ch_pos is None:
ch_names = None
else:
ch_names = list(ch_pos)
dig = _make_dig_points(
nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=hsp,
dig_ch_pos=ch_pos, coord_frame=coord_frame
)
return DigMontage(dig=dig, ch_names=ch_names)
class DigMontage(object):
"""Montage for digitized electrode and headshape position data.
.. warning:: Montages are typically created using one of the helper
functions in the ``See Also`` section below instead of
instantiating this class directly.
Parameters
----------
dev_head_t : array, shape (4, 4)
A Device-to-Head transformation matrix.
dig : list of dict
The object containing all the dig points.
ch_names : list of str
The names of the EEG channels.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, dev_head_t=None, dig=None, ch_names=None):
# XXX: dev_head_t now is np.array, we should add dev_head_transform
# (being instance of Transformation) and move the parameter to the
# end of the call.
dig = list() if dig is None else dig
_validate_type(item=dig, types=list, item_name='dig')
ch_names = list() if ch_names is None else ch_names
n_eeg = sum([1 for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG])
if n_eeg != len(ch_names):
raise ValueError(
'The number of EEG channels (%d) does not match the number'
' of channel names provided (%d)' % (n_eeg, len(ch_names))
)
self.dev_head_t = dev_head_t
self.dig = dig
self.ch_names = ch_names
def __repr__(self):
"""Return string representation."""
n_points = _count_points_by_type(self.dig)
return ('<DigMontage | {extra:d} extras (headshape), {hpi:d} HPIs,'
' {fid:d} fiducials, {eeg:d} channels>').format(**n_points)
@copy_function_doc_to_method_doc(plot_montage)
def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True,
sphere=None):
return plot_montage(self, scale_factor=scale_factor,
show_names=show_names, kind=kind, show=show,
sphere=sphere)
def rename_channels(self, mapping):
"""Rename the channels.
Parameters
----------
%(rename_channels_mapping)s
Returns
-------
inst : instance of DigMontage
The instance. Operates in-place.
"""
from .channels import rename_channels
temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg')
rename_channels(temp_info, mapping)
self.ch_names = temp_info['ch_names']
def save(self, fname):
"""Save digitization points to FIF.
Parameters
----------
fname : str
The filename to use. Should end in .fif or .fif.gz.
"""
if _check_get_coord_frame(self.dig) != 'head':
raise RuntimeError('Can only write out digitization points in '
'head coordinates.')
write_dig(fname, self.dig)
def __iadd__(self, other):
"""Add two DigMontages in place.
Notes
-----
Two DigMontages can only be added if there are no duplicated ch_names
and if fiducials are present they should share the same coordinate
system and location values.
"""
def is_fid_defined(fid):
return not(
fid.nasion is None and fid.lpa is None and fid.rpa is None
)
# Check for none duplicated ch_names
ch_names_intersection = set(self.ch_names).intersection(other.ch_names)
if ch_names_intersection:
raise RuntimeError((
"Cannot add two DigMontage objects if they contain duplicated"
" channel names. Duplicated channel(s) found: {}."
).format(
', '.join(['%r' % v for v in sorted(ch_names_intersection)])
))
# Check for unique matching fiducials
self_fid, self_coord = _get_fid_coords(self.dig)
other_fid, other_coord = _get_fid_coords(other.dig)
if is_fid_defined(self_fid) and is_fid_defined(other_fid):
if self_coord != other_coord:
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations are not in the same '
'coordinate system.')
for kk in self_fid:
if not np.array_equal(self_fid[kk], other_fid[kk]):
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations do not match '
'(%s)' % kk)
# keep self
self.dig = _format_dig_points(
self.dig + [d for d in other.dig
if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]
)
else:
self.dig = _format_dig_points(self.dig + other.dig)
self.ch_names += other.ch_names
return self
def copy(self):
"""Copy the DigMontage object.
Returns
-------
dig : instance of DigMontage
The copied DigMontage instance.
"""
return deepcopy(self)
def __add__(self, other):
"""Add two DigMontages."""
out = self.copy()
out += other
return out
def _get_ch_pos(self):
pos = [d['r'] for d in _get_dig_eeg(self.dig)]
assert len(self.ch_names) == len(pos)
return OrderedDict(zip(self.ch_names, pos))
def _get_dig_names(self):
NAMED_KIND = (FIFF.FIFFV_POINT_EEG,)
is_eeg = np.array([d['kind'] in NAMED_KIND for d in self.dig])
assert len(self.ch_names) == is_eeg.sum()
dig_names = [None] * len(self.dig)
for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]):
dig_names[dig_idx] = self.ch_names[ch_name_idx]
return dig_names
VALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1)
def _check_unit_and_get_scaling(unit):
_check_option('unit', unit, sorted(VALID_SCALES.keys()))
return VALID_SCALES[unit]
def transform_to_head(montage):
"""Transform a DigMontage object into head coordinate.
It requires that the LPA, RPA and Nasion fiducial
point are available. It requires that all fiducial
points are in the same coordinate e.g. 'unknown'
and it will convert all the point in this coordinate
system to Neuromag head coordinate system.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
montage : instance of DigMontage
The montage after transforming the points to head
coordinate system.
"""
# Get fiducial points and their coord_frame
native_head_t = compute_native_head_t(montage)
montage = montage.copy() # to avoid inplace modification
if native_head_t['from'] != FIFF.FIFFV_COORD_HEAD:
for d in montage.dig:
if d['coord_frame'] == native_head_t['from']:
d['r'] = apply_trans(native_head_t, d['r'])
d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return montage
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif len(items) != 5:
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
def read_dig_fif(fname):
r"""Read digitized points from a .fif file.
Note that electrode names are not present in the .fif file so
they are here defined with the convention from VectorView
systems (EEG001, EEG002, etc.)
Parameters
----------
fname : path-like
FIF file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_captrak
read_dig_polhemus_isotrak
read_dig_hpts
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
# Load the dig data
f, tree = fiff_open(fname)[:2]
with f as fid:
dig = _read_dig_fif(fid, tree)
ch_names = []
for d in dig:
if d['kind'] == FIFF.FIFFV_POINT_EEG:
ch_names.append('EEG%03d' % d['ident'])
montage = DigMontage(dig=dig, ch_names=ch_names)
return montage
def read_dig_hpts(fname, unit='mm'):
"""Read historical .hpts mne-c files.
Parameters
----------
fname : str
The filepath of .hpts file.
unit : 'm' | 'cm' | 'mm'
Unit of the positions. Defaults to 'mm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
The hpts format digitzer data file may contain comment lines starting
with the pound sign (#) and data lines of the form::
<*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
where:
``<*category*>``
defines the type of points. Allowed categories are: ``hpi``,
``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to
head-position indicator coil locations, cardinal landmarks, EEG
electrode locations, and additional head surface points,
respectively.
``<*identifier*>``
identifies the point. The identifiers are usually sequential
numbers. For cardinal landmarks, 1 = left auricular point,
2 = nasion, and 3 = right auricular point. For EEG electrodes,
identifier = 0 signifies the reference electrode.
``<*x/mm*> , <*y/mm*> , <*z/mm*>``
Location of the point, usually in the head coordinate system
in millimeters. If your points are in [m] then unit parameter can
be changed.
For example::
cardinal 2 -5.6729 -12.3873 -30.3671
cardinal 1 -37.6782 -10.4957 91.5228
cardinal 3 -131.3127 9.3976 -22.2363
hpi 1 -30.4493 -11.8450 83.3601
hpi 2 -122.5353 9.2232 -28.6828
hpi 3 -6.8518 -47.0697 -37.0829
hpi 4 7.3744 -50.6297 -12.1376
hpi 5 -33.4264 -43.7352 -57.7756
eeg FP1 3.8676 -77.0439 -13.0212
eeg FP2 -31.9297 -70.6852 -57.4881
eeg F7 -6.1042 -68.2969 45.4939
...
"""
from ._standard_montage_utils import _str_names, _str
_scale = _check_unit_and_get_scaling(unit)
out = np.genfromtxt(fname, comments='#',
dtype=(_str, _str, 'f8', 'f8', 'f8'))
kind, label = _str_names(out['f0']), _str_names(out['f1'])
kind = [k.lower() for k in kind]
xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T
xyz *= _scale
del _scale
fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'}
fid = {fid_idx_to_label[label[ii]]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'}
ch_pos = {label[ii]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'}
hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'hpi'])
hpi.shape = (-1, 3) # in case it's empty
hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'extra'])
hsp.shape = (-1, 3) # in case it's empty
return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp)
def read_dig_egi(fname):
"""Read electrode locations from EGI system.
Parameters
----------
fname : path-like
EGI MFF XML coordinates file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _read_dig_montage_egi(
fname=fname,
_scaling=1.,
_all_data_kwargs_are_none=True
)
# XXX: to change to the new naming in v.0.20 (all this block should go)
data.pop('point_names')
data['hpi'] = data.pop('elp')
data['ch_pos'] = data.pop('dig_ch_pos')
return make_dig_montage(**data)
def read_dig_captrak(fname):
"""Read electrode locations from CapTrak Brain Products system.
Parameters
----------
fname : path-like
BrainVision CapTrak coordinates file from which to read EEG electrode
locations. This is typically in XML format with the .bvct extension.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _parse_brainvision_dig_montage(fname, scale=1e-3)
return make_dig_montage(**data)
@deprecated('read_dig_captrack is deprecated and will be removed in 0.22; '
'please use read_dig_captrak instead '
'(note the spelling correction: captraCK -> captraK).')
def read_dig_captrack(fname):
"""Read electrode locations from CapTrak Brain Products system.
Parameters
----------
fname : path-like
BrainVision CapTrak coordinates file from which to read EEG electrode
locations. This is typically in XML format with the .bvct extension.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
return read_dig_captrak(fname)
def _get_montage_in_head(montage):
coords = set([d['coord_frame'] for d in montage.dig])
if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD:
return montage
else:
return transform_to_head(montage.copy())
@fill_doc
def _set_montage(info, montage, match_case=True, on_missing='raise'):
"""Apply montage to data.
With a DigMontage, this function will replace the digitizer info with
the values specified for the particular montage.
Usually, a montage is expected to contain the positions of all EEG
electrodes and a warning is raised when this is not the case.
Parameters
----------
info : instance of Info
The measurement info to update.
%(montage)s
%(match_case)s
%(on_missing_montage)s
Notes
-----
This function will change the info variable in place.
"""
_validate_type(montage, types=(DigMontage, type(None), str),
item_name='montage')
if isinstance(montage, str): # load builtin montage
_check_option('montage', montage, _BUILT_IN_MONTAGES)
montage = make_standard_montage(montage)
if isinstance(montage, DigMontage):
mnt_head = _get_montage_in_head(montage)
def _backcompat_value(pos, ref_pos):
if any(np.isnan(pos)):
return np.full(6, np.nan)
else:
return np.concatenate((pos, ref_pos))
ch_pos = mnt_head._get_ch_pos()
refs = set(ch_pos) & {'EEG000', 'REF'}
assert len(refs) <= 1
eeg_ref_pos = np.zeros(3) if not(refs) else ch_pos.pop(refs.pop())
# This raises based on info being subset/superset of montage
_pick_chs = partial(
pick_types, exclude=[], eeg=True, seeg=True, ecog=True, meg=False,
)
info_names = [info['ch_names'][ii] for ii in _pick_chs(info)]
dig_names = mnt_head._get_dig_names()
ref_names = [None, 'EEG000', 'REF']
if match_case:
ch_pos_use = ch_pos
info_names_use = info_names
dig_names_use = dig_names
else:
ch_pos_use = OrderedDict(
(name.lower(), pos) for name, pos in ch_pos.items())
info_names_use = [name.lower() for name in info_names]
dig_names_use = [name.lower() if name is not None else name
for name in dig_names]
ref_names = [name.lower() if name is not None else name
for name in ref_names]
n_dup = len(ch_pos) - len(ch_pos_use)
if n_dup:
raise ValueError('Cannot use match_case=False as %s montage '
'name(s) require case sensitivity' % n_dup)
n_dup = len(info_names_use) - len(set(info_names_use))
if n_dup:
raise ValueError('Cannot use match_case=False as %s channel '
'name(s) require case sensitivity' % n_dup)
# warn user if there is not a full overlap of montage with info_chs
not_in_montage = [name for name, use in zip(info_names, info_names_use)
if use not in ch_pos_use]
if len(not_in_montage): # DigMontage is subset of info
missing_coord_msg = (
'DigMontage is only a subset of info. There are '
f'{len(not_in_montage)} channel position{_pl(not_in_montage)} '
'not present in the DigMontage. The required channels are:\n\n'
f'{not_in_montage}.\n\nConsider using inst.set_channel_types '
'if these are not EEG channels, or use the on_missing '
'parameter if the channel positions are allowed to be unknown '
'in your analyses.'
)
_on_missing(on_missing, missing_coord_msg)
# set ch coordinates and names from digmontage or nan coords
ch_pos_use = dict(
(name, ch_pos_use.get(name, [np.nan] * 3))
for name in info_names) # order does not matter here
for name, use in zip(info_names, info_names_use):
_loc_view = info['chs'][info['ch_names'].index(name)]['loc']
_loc_view[:6] = _backcompat_value(ch_pos_use[use], eeg_ref_pos)
del ch_pos_use
# XXX this is probably wrong as it uses the order from the montage
# rather than the order of our info['ch_names'] ...
info['dig'] = _format_dig_points([
mnt_head.dig[ii] for ii, name in enumerate(dig_names_use)
if name in (info_names_use + ref_names)])
if mnt_head.dev_head_t is not None:
info['dev_head_t'] = Transform('meg', 'head', mnt_head.dev_head_t)
else: # None case
info['dig'] = None
for ch in info['chs']:
ch['loc'] = np.full(12, np.nan)
def _read_isotrak_elp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.elp`` file.
Parameters
----------
fname : str
The filepath of .elp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
value_pattern = r"\-?\d+\.?\d*e?\-?\d*"
coord_pattern = r"({0})\s+({0})\s+({0})\s*$".format(value_pattern)
with open(fname) as fid:
file_str = fid.read()
points_str = [m.groups() for m in re.finditer(coord_pattern, file_str,
re.MULTILINE)]
points = np.array(points_str, dtype=float)
return {
'nasion': points[0], 'lpa': points[1], 'rpa': points[2],
'points': points[3:]
}
def _read_isotrak_hsp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.hsp`` file.
Parameters
----------
fname : str
The filepath of .hsp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
def get_hsp_fiducial(line):
return np.fromstring(line.replace('%F', ''), dtype=float, sep='\t')
with open(fname) as ff:
for line in ff:
if 'position of fiducials' in line.lower():
break
nasion = get_hsp_fiducial(ff.readline())
lpa = get_hsp_fiducial(ff.readline())
rpa = get_hsp_fiducial(ff.readline())
_ = ff.readline()
line = ff.readline()
if line:
n_points, n_cols = np.fromstring(line, dtype=int, sep='\t')
points = np.fromstring(
string=ff.read(), dtype=float, sep='\t',
).reshape(-1, n_cols)
assert points.shape[0] == n_points
else:
points = np.empty((0, 3))
return {
'nasion': nasion, 'lpa': lpa, 'rpa': rpa, 'points': points
}
def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'):
"""Read Polhemus digitizer data from a file.
Parameters
----------
fname : str
The filepath of Polhemus ISOTrak formatted file.
File extension is expected to be '.hsp', '.elp' or '.eeg'.
ch_names : None | list of str
The names of the points. This will make the points
considered as EEG channels. If None, channels will be assumed
to be HPI if the extension is ``'.elp'``, and extra headshape
points otherwise.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus ISOTrak systems data is usually
exported in meters. Defaults to 'm'
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
make_dig_montage
read_polhemus_fastscan
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
"""
VALID_FILE_EXT = ('.hsp', '.elp', '.eeg')
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if ext == '.elp':
data = _read_isotrak_elp_points(fname)
else:
# Default case we read points as hsp since is the most likely scenario
data = _read_isotrak_hsp_points(fname)
if _scale != 1:
data = {key: val * _scale for key, val in data.items()}
else:
pass # noqa
if ch_names is None:
keyword = 'hpi' if ext == '.elp' else 'hsp'
data[keyword] = data.pop('points')
else:
points = data.pop('points')
if points.shape[0] == len(ch_names):
data['ch_pos'] = OrderedDict(zip(ch_names, points))
else:
raise ValueError((
"Length of ``ch_names`` does not match the number of points"
" in {fname}. Expected ``ch_names`` length {n_points:d},"
" given {n_chnames:d}"
).format(
fname=fname, n_points=points.shape[0], n_chnames=len(ch_names)
))
return make_dig_montage(**data)
def _is_polhemus_fastscan(fname):
header = ''
with open(fname, 'r') as fid:
for line in fid:
if not line.startswith('%'):
break
header += line
return 'FastSCAN' in header
def read_polhemus_fastscan(fname, unit='mm'):
"""Read Polhemus FastSCAN digitizer data from a ``.txt`` file.
Parameters
----------
fname : str
The filepath of .txt Polhemus FastSCAN file.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus FastSCAN systems data is usually
exported in millimeters. Defaults to 'mm'
Returns
-------
points : array, shape (n_points, 3)
The digitization points in digitizer coordinates.
See Also
--------
read_dig_polhemus_isotrak
make_dig_montage
"""
VALID_FILE_EXT = ['.txt']
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if not _is_polhemus_fastscan(fname):
raise ValueError(
"%s does not contain Polhemus FastSCAN header" % fname)
points = _scale * np.loadtxt(fname, comments='%', ndmin=2)
return points
def _read_eeglab_locations(fname):
ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist()
topo = np.loadtxt(fname, dtype=float, usecols=[1, 2])
sph = _topo_to_sph(topo)
pos = _sph_to_cart(sph)
pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1]
return ch_names, pos
def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None):
"""Read a montage from a file.
Parameters
----------
fname : str
File extension is expected to be:
'.loc' or '.locs' or '.eloc' (for EEGLAB files),
'.sfp' (BESA/EGI files), '.csd',
'.elc', '.txt', '.csd', '.elp' (BESA spherical),
'.bvef' (BrainVision files).
head_size : float | None
The size of the head (radius, in [m]). If ``None``, returns the values
read from the montage file with no modification. Defaults to 0.095m.
coord_frame : str | None
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space. Defaults to None, which is "unknown" for
most readers but "head" for EEGLAB.
.. versionadded:: 0.20
Returns
-------
montage : instance of DigMontage
The montage.
Notes
-----
The function is a helper to read electrode positions you may have
in various formats. Most of these format are weakly specified
in terms of units, coordinate systems. It implies that setting
a montage using a DigMontage produced by this function may
be problematic. If you use a standard/template (eg. 10/20,
10/10 or 10/05) we recommend you use :func:`make_standard_montage`.
If you can have positions in memory you can also use
:func:`make_dig_montage` that takes arrays as input.
See Also
--------
make_dig_montage
make_standard_montage
"""
from ._standard_montage_utils import (
_read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc,
_read_elp_besa, _read_brainvision
)
SUPPORTED_FILE_EXT = {
'eeglab': ('.loc', '.locs', '.eloc', ),
'hydrocel': ('.sfp', ),
'matlab': ('.csd', ),
'asa electrode': ('.elc', ),
'generic (Theta-phi in degrees)': ('.txt', ),
'standard BESA spherical': ('.elp', ), # XXX: not same as polhemus elp
'brainvision': ('.bvef', ),
}
_, ext = op.splitext(fname)
_check_option('fname', ext, list(sum(SUPPORTED_FILE_EXT.values(), ())))
if ext in SUPPORTED_FILE_EXT['eeglab']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
ch_names, pos = _read_eeglab_locations(fname)
scale = head_size / np.median(np.linalg.norm(pos, axis=-1))
pos *= scale
montage = make_dig_montage(
ch_pos=OrderedDict(zip(ch_names, pos)),
coord_frame='head',
)
elif ext in SUPPORTED_FILE_EXT['hydrocel']:
montage = _read_sfp(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['matlab']:
montage = _read_csd(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['asa electrode']:
montage = _read_elc(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['generic (Theta-phi in degrees)']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
montage = _read_theta_phi_in_degrees(fname, head_size=head_size,
fid_names=('Nz', 'LPA', 'RPA'))
elif ext in SUPPORTED_FILE_EXT['standard BESA spherical']:
montage = _read_elp_besa(fname, head_size)
elif ext in SUPPORTED_FILE_EXT['brainvision']:
montage = _read_brainvision(fname, head_size)
if coord_frame is not None:
coord_frame = _coord_frame_const(coord_frame)
for d in montage.dig:
d['coord_frame'] = coord_frame
return montage
def compute_dev_head_t(montage):
"""Compute device to head transform from a DigMontage.
Parameters
----------
montage : instance of DigMontage
The DigMontage must contain the fiducials in head
coordinate system and hpi points in both head and
meg device coordinate system.
Returns
-------
dev_head_t : instance of Transform
A Device-to-Head transformation matrix.
"""
_, coord_frame = _get_fid_coords(montage.dig)
if coord_frame != FIFF.FIFFV_COORD_HEAD:
raise ValueError('montage should have been set to head coordinate '
'system with transform_to_head function.')
hpi_head = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)], float)
hpi_dev = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)], float)
if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0):
raise ValueError((
"To compute Device-to-Head transformation, the same number of HPI"
" points in device and head coordinates is required. (Got {dev}"
" points in device and {head} points in head coordinate systems)"
).format(dev=len(hpi_dev), head=len(hpi_head)))
trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0])
return Transform(fro='meg', to='head', trans=trans)
def compute_native_head_t(montage):
"""Compute the native-to-head transformation for a montage.
This uses the fiducials in the native space to transform to compute the
transform to the head coordinate frame.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
native_head_t : instance of Transform
A native-to-head transformation matrix.
"""
# Get fiducial points and their coord_frame
fid_coords, coord_frame = _get_fid_coords(montage.dig)
if coord_frame is None:
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
if coord_frame == FIFF.FIFFV_COORD_HEAD:
native_head_t = np.eye(4)
else:
fid_keys = ('nasion', 'lpa', 'rpa')
for key in fid_keys:
if fid_coords[key] is None:
warn('Fiducial point %s not found, assuming identity %s to '
'head transformation'
% (key, _verbose_frames[coord_frame],))
native_head_t = np.eye(4)
break
else:
native_head_t = get_ras_to_neuromag_trans(
*[fid_coords[key] for key in fid_keys])
return Transform(coord_frame, 'head', native_head_t)
def make_standard_montage(kind, head_size=HEAD_SIZE_DEFAULT):
"""Read a generic (built-in) montage.
Parameters
----------
kind : str
The name of the montage to use. See notes for valid kinds.
head_size : float
The head size (radius, in meters) to use for spherical montages.
Defaults to 95mm.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
make_dig_montage
read_custom_montage
Notes
-----
Individualized (digitized) electrode positions should be read in using
:func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`,
:func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`,
:func:`read_dig_hpts` or made with :func:`make_dig_montage`.
Valid ``kind`` arguments are:
=================== =====================================================
Kind Description
=================== =====================================================
standard_1005 Electrodes are named and positioned according to the
international 10-05 system (343+3 locations)
standard_1020 Electrodes are named and positioned according to the
international 10-20 system (94+3 locations)
standard_alphabetic Electrodes are named with LETTER-NUMBER combinations
(A1, B2, F4, ...) (65+3 locations)
standard_postfixed Electrodes are named according to the international
10-20 system using postfixes for intermediate
positions (100+3 locations)
standard_prefixed Electrodes are named according to the international
10-20 system using prefixes for intermediate
positions (74+3 locations)
standard_primed Electrodes are named according to the international
10-20 system using prime marks (' and '') for
intermediate positions (100+3 locations)
biosemi16 BioSemi cap with 16 electrodes (16+3 locations)
biosemi32 BioSemi cap with 32 electrodes (32+3 locations)
biosemi64 BioSemi cap with 64 electrodes (64+3 locations)
biosemi128 BioSemi cap with 128 electrodes (128+3 locations)
biosemi160 BioSemi cap with 160 electrodes (160+3 locations)
biosemi256 BioSemi cap with 256 electrodes (256+3 locations)
easycap-M1 EasyCap with 10-05 electrode names (74 locations)
easycap-M10 EasyCap with numbered electrodes (61 locations)
EGI_256 Geodesic Sensor Net (256 locations)
GSN-HydroCel-32 HydroCel Geodesic Sensor Net and Cz (33+3 locations)
GSN-HydroCel-64_1.0 HydroCel Geodesic Sensor Net (64+3 locations)
GSN-HydroCel-65_1.0 HydroCel Geodesic Sensor Net and Cz (65+3 locations)
GSN-HydroCel-128 HydroCel Geodesic Sensor Net (128+3 locations)
GSN-HydroCel-129 HydroCel Geodesic Sensor Net and Cz (129+3 locations)
GSN-HydroCel-256 HydroCel Geodesic Sensor Net (256+3 locations)
GSN-HydroCel-257 HydroCel Geodesic Sensor Net and Cz (257+3 locations)
mgh60 The (older) 60-channel cap used at
MGH (60+3 locations)
mgh70 The (newer) 70-channel BrainVision cap used at
MGH (70+3 locations)
=================== =====================================================
.. versionadded:: 0.19.0
"""
from ._standard_montage_utils import standard_montage_look_up_table
_check_option('kind', kind, _BUILT_IN_MONTAGES)
return standard_montage_look_up_table[kind](head_size=head_size)
| bsd-3-clause |
publicus/pelican-plugins | liquid_tags/mdx_liquid_tags.py | 281 | 3447 | """
Markdown Extension for Liquid-style Tags
----------------------------------------
A markdown extension to allow user-defined tags of the form::
{% tag arg1 arg2 ... argn %}
Where "tag" is associated with some user-defined extension.
These result in a preprocess step within markdown that produces
either markdown or html.
"""
import warnings
import markdown
import itertools
import re
import os
from functools import wraps
# Define some regular expressions
LIQUID_TAG = re.compile(r'\{%.*?%\}', re.MULTILINE | re.DOTALL)
EXTRACT_TAG = re.compile(r'(?:\s*)(\S+)(?:\s*)')
LT_CONFIG = { 'CODE_DIR': 'code',
'NOTEBOOK_DIR': 'notebooks',
'FLICKR_API_KEY': 'flickr',
'GIPHY_API_KEY': 'giphy'
}
LT_HELP = { 'CODE_DIR' : 'Code directory for include_code subplugin',
'NOTEBOOK_DIR' : 'Notebook directory for notebook subplugin',
'FLICKR_API_KEY': 'Flickr key for accessing the API',
'GIPHY_API_KEY': 'Giphy key for accessing the API'
}
class _LiquidTagsPreprocessor(markdown.preprocessors.Preprocessor):
_tags = {}
def __init__(self, configs):
self.configs = configs
def run(self, lines):
page = '\n'.join(lines)
liquid_tags = LIQUID_TAG.findall(page)
for i, markup in enumerate(liquid_tags):
# remove {% %}
markup = markup[2:-2]
tag = EXTRACT_TAG.match(markup).groups()[0]
markup = EXTRACT_TAG.sub('', markup, 1)
if tag in self._tags:
liquid_tags[i] = self._tags[tag](self, tag, markup.strip())
# add an empty string to liquid_tags so that chaining works
liquid_tags.append('')
# reconstruct string
page = ''.join(itertools.chain(*zip(LIQUID_TAG.split(page),
liquid_tags)))
# resplit the lines
return page.split("\n")
class LiquidTags(markdown.Extension):
"""Wrapper for MDPreprocessor"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
for key,value in LT_CONFIG.items():
self.config[key] = [value,LT_HELP[key]]
super(LiquidTags,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
for key,value in LT_CONFIG.items():
config[key] = [config[key],LT_HELP[key]]
super(LiquidTags,self).__init__(config)
@classmethod
def register(cls, tag):
"""Decorator to register a new include tag"""
def dec(func):
if tag in _LiquidTagsPreprocessor._tags:
warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag)
_LiquidTagsPreprocessor._tags[tag] = func
return func
return dec
def extendMarkdown(self, md, md_globals):
self.htmlStash = md.htmlStash
md.registerExtension(self)
# for the include_code preprocessor, we need to re-run the
# fenced code block preprocessor after substituting the code.
# Because the fenced code processor is run before, {% %} tags
# within equations will not be parsed as an include.
md.preprocessors.add('mdincludes',
_LiquidTagsPreprocessor(self), ">html_block")
def makeExtension(configs=None):
"""Wrapper for a MarkDown extension"""
return LiquidTags(configs=configs)
| agpl-3.0 |
kytvi2p/Sigil | 3rdparty/python/Lib/test/test_script_helper.py | 8 | 5187 | """Unittests for test.script_helper. Who tests the test helper?"""
import subprocess
import sys
from test import script_helper
import unittest
from unittest import mock
class TestScriptHelper(unittest.TestCase):
def test_assert_python_expect_success(self):
t = script_helper._assert_python(True, '-c', 'import sys; sys.exit(0)')
self.assertEqual(0, t[0], 'return code was not 0')
def test_assert_python_expect_failure(self):
# I didn't import the sys module so this child will fail.
rc, out, err = script_helper._assert_python(False, '-c', 'sys.exit(0)')
self.assertNotEqual(0, rc, 'return code should not be 0')
def test_assert_python_raises_expect_success(self):
# I didn't import the sys module so this child will fail.
with self.assertRaises(AssertionError) as error_context:
script_helper._assert_python(True, '-c', 'sys.exit(0)')
error_msg = str(error_context.exception)
self.assertIn('command line was:', error_msg)
self.assertIn('sys.exit(0)', error_msg, msg='unexpected command line')
def test_assert_python_raises_expect_failure(self):
with self.assertRaises(AssertionError) as error_context:
script_helper._assert_python(False, '-c', 'import sys; sys.exit(0)')
error_msg = str(error_context.exception)
self.assertIn('Process return code is 0,', error_msg)
self.assertIn('import sys; sys.exit(0)', error_msg,
msg='unexpected command line.')
@mock.patch('subprocess.Popen')
def test_assert_python_isolated_when_env_not_required(self, mock_popen):
with mock.patch.object(script_helper,
'_interpreter_requires_environment',
return_value=False) as mock_ire_func:
mock_popen.side_effect = RuntimeError('bail out of unittest')
try:
script_helper._assert_python(True, '-c', 'None')
except RuntimeError as err:
self.assertEqual('bail out of unittest', err.args[0])
self.assertEqual(1, mock_popen.call_count)
self.assertEqual(1, mock_ire_func.call_count)
popen_command = mock_popen.call_args[0][0]
self.assertEqual(sys.executable, popen_command[0])
self.assertIn('None', popen_command)
self.assertIn('-I', popen_command)
self.assertNotIn('-E', popen_command) # -I overrides this
@mock.patch('subprocess.Popen')
def test_assert_python_not_isolated_when_env_is_required(self, mock_popen):
"""Ensure that -I is not passed when the environment is required."""
with mock.patch.object(script_helper,
'_interpreter_requires_environment',
return_value=True) as mock_ire_func:
mock_popen.side_effect = RuntimeError('bail out of unittest')
try:
script_helper._assert_python(True, '-c', 'None')
except RuntimeError as err:
self.assertEqual('bail out of unittest', err.args[0])
popen_command = mock_popen.call_args[0][0]
self.assertNotIn('-I', popen_command)
self.assertNotIn('-E', popen_command)
class TestScriptHelperEnvironment(unittest.TestCase):
"""Code coverage for _interpreter_requires_environment()."""
def setUp(self):
self.assertTrue(
hasattr(script_helper, '__cached_interp_requires_environment'))
# Reset the private cached state.
script_helper.__dict__['__cached_interp_requires_environment'] = None
def tearDown(self):
# Reset the private cached state.
script_helper.__dict__['__cached_interp_requires_environment'] = None
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_true(self, mock_check_call):
mock_check_call.side_effect = subprocess.CalledProcessError('', '')
self.assertTrue(script_helper._interpreter_requires_environment())
self.assertTrue(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_false(self, mock_check_call):
# The mocked subprocess.check_call fakes a no-error process.
script_helper._interpreter_requires_environment()
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_details(self, mock_check_call):
script_helper._interpreter_requires_environment()
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
check_call_command = mock_check_call.call_args[0][0]
self.assertEqual(sys.executable, check_call_command[0])
self.assertIn('-E', check_call_command)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
0asa/scikit-learn | sklearn/feature_selection/univariate_selection.py | 5 | 18520 | """Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = stats.fprob(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the Anova F-value for the provided sample
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared statistic for each class/feature combination.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain booleans or frequencies (e.g., term counts in document
classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = check_array(X.sum(axis=0))
class_prob = check_array(Y.mean(axis=0))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is the target false
discovery rate.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
alpha = self.alpha
sv = np.sort(self.pvalues_)
threshold = sv[sv < alpha * np.arange(len(self.pvalues_))].max()
return self.pvalues_ <= threshold
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| bsd-3-clause |
ric2b/Vivaldi-browser | chromium/tools/binary_size/libsupersize/concurrent_test.py | 2 | 5418 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import threading
import unittest
import concurrent
def _ForkTestHelper(arg1, arg2, pickle_me_not, test_instance, parent_pid):
_ = pickle_me_not # Suppress lint warning.
test_instance.assertNotEquals(os.getpid(), parent_pid)
return arg1 + arg2
class Unpicklable(object):
"""Ensures that pickle() is not called on parameters."""
def __getstate__(self):
raise AssertionError('Tried to pickle')
class ConcurrentTest(unittest.TestCase):
def testEncodeDictOfLists_Empty(self):
test_dict = {}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_EmptyValue(self):
test_dict = {'foo': []}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_AllStrings(self):
test_dict = {'foo': ['a', 'b', 'c'], 'foo2': ['a', 'b']}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_KeyTransform(self):
test_dict = {0: ['a', 'b', 'c'], 9: ['a', 'b']}
encoded = concurrent.EncodeDictOfLists(test_dict, key_transform=str)
decoded = concurrent.DecodeDictOfLists(encoded, key_transform=int)
self.assertEquals(test_dict, decoded)
def testEncodeDictOfLists_ValueTransform(self):
test_dict = {'a': ['0', '1', '2'], 'b': ['3', '4']}
expected = {'a': [0, 1, 2], 'b': [3, 4]}
encoded = concurrent.EncodeDictOfLists(test_dict)
decoded = concurrent.DecodeDictOfLists(encoded, value_transform=int)
self.assertEquals(expected, decoded)
def testEncodeDictOfLists_Join_Empty(self):
test_dict1 = {}
test_dict2 = {}
expected = {}
encoded1 = concurrent.EncodeDictOfLists(test_dict1)
encoded2 = concurrent.EncodeDictOfLists(test_dict2)
encoded = concurrent.JoinEncodedDictOfLists([encoded1, encoded2])
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(expected, decoded)
def testEncodeDictOfLists_Join_Singl(self):
test_dict1 = {'key1': ['a']}
encoded1 = concurrent.EncodeDictOfLists(test_dict1)
encoded = concurrent.JoinEncodedDictOfLists([encoded1])
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(test_dict1, decoded)
def testEncodeDictOfLists_JoinMultiple(self):
test_dict1 = {'key1': ['a']}
test_dict2 = {'key2': ['b']}
expected = {'key1': ['a'], 'key2': ['b']}
encoded1 = concurrent.EncodeDictOfLists(test_dict1)
encoded2 = concurrent.EncodeDictOfLists({})
encoded3 = concurrent.EncodeDictOfLists(test_dict2)
encoded = concurrent.JoinEncodedDictOfLists([encoded1, encoded2, encoded3])
decoded = concurrent.DecodeDictOfLists(encoded)
self.assertEquals(expected, decoded)
def testCallOnThread(self):
main_thread = threading.current_thread()
def callback(arg1, arg2):
self.assertEquals(1, arg1)
self.assertEquals(2, arg2)
my_thread = threading.current_thread()
self.assertNotEquals(my_thread, main_thread)
return 3
result = concurrent.CallOnThread(callback, 1, arg2=2)
self.assertEquals(3, result.get())
def testForkAndCall_normal(self):
parent_pid = os.getpid()
result = concurrent.ForkAndCall(
_ForkTestHelper, (1, 2, Unpicklable(), self, parent_pid))
self.assertEquals(3, result.get())
def testForkAndCall_exception(self):
parent_pid = os.getpid()
result = concurrent.ForkAndCall(
_ForkTestHelper, (1, 'a', None, self, parent_pid))
self.assertRaises(TypeError, result.get)
def testBulkForkAndCall_none(self):
results = concurrent.BulkForkAndCall(_ForkTestHelper, [])
self.assertEquals([], list(results))
def testBulkForkAndCall_few(self):
parent_pid = os.getpid()
results = concurrent.BulkForkAndCall(_ForkTestHelper, [
(1, 2, Unpicklable(), self, parent_pid),
(3, 4, None, self, parent_pid)])
self.assertEquals({3, 7}, set(results))
def testBulkForkAndCall_few_kwargs(self):
parent_pid = os.getpid()
results = concurrent.BulkForkAndCall(_ForkTestHelper,
[(1, 2, Unpicklable()), (3, 4, None)],
test_instance=self, parent_pid=parent_pid)
self.assertEquals({3, 7}, set(results))
def testBulkForkAndCall_many(self):
parent_pid = os.getpid()
args = [(1, 2, Unpicklable(), self, parent_pid) for _ in xrange(100)]
results = concurrent.BulkForkAndCall(_ForkTestHelper, args)
self.assertEquals([3] * 100, list(results))
def testBulkForkAndCall_many_kwargs(self):
parent_pid = os.getpid()
args = [(1, 2) for _ in xrange(100)]
results = concurrent.BulkForkAndCall(
_ForkTestHelper, args, pickle_me_not=Unpicklable(), test_instance=self,
parent_pid=parent_pid)
self.assertEquals([3] * 100, list(results))
def testBulkForkAndCall_exception(self):
parent_pid = os.getpid()
results = concurrent.BulkForkAndCall(_ForkTestHelper, [
(1, 'a', self, parent_pid)])
self.assertRaises(TypeError, results.next)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
quarkslab/irma | probe/tests/probe/controllers/test_ftpctrl.py | 1 | 1780 | from unittest import TestCase
from mock import patch, MagicMock, call
import probe.controllers.ftpctrl as module
from irma.common.base.exceptions import IrmaFtpError
class TestFtpctrl(TestCase):
@patch("probe.controllers.ftpctrl.os.path.isdir")
@patch('probe.controllers.ftpctrl.config.IrmaSFTPv2')
def test_upload_files(self, m_IrmaSFTPv2, m_isdir):
parent_filename = "parent_file"
filelist = ["file1", "file2"]
m_ftp = MagicMock()
m_IrmaSFTPv2().__enter__.return_value = m_ftp
m_isdir.return_value = False
module.upload_files("frontend", "path", filelist, parent_filename)
m_isdir.assert_has_calls([call('path/file1'),
call('path/file2')])
m_ftp.upload_file.assert_has_calls([call('parent_file_0',
'path/file1'),
call('parent_file_1',
'path/file2')])
@patch("probe.controllers.ftpctrl.os.path.isdir")
@patch('probe.controllers.ftpctrl.config.IrmaSFTPv2')
def test_upload_files_not_a_file(self, m_IrmaSFTPv2, m_isdir):
m_isdir.return_value = True
m_ftp = MagicMock()
m_IrmaSFTPv2().__enter__.return_value = m_ftp
module.upload_files("frontend", "path", ["dir"], "parent_file")
m_ftp.upload_file.assert_not_called()
@patch('probe.controllers.ftpctrl.config.IrmaSFTPv2')
def test_download_file(self, m_IrmaSFTPv2):
filename = "file4"
m_ftp = MagicMock()
m_IrmaSFTPv2().__enter__.return_value = m_ftp
module.download_file("frontend", "srcname", filename)
m_ftp.download_file.assert_called_once_with(".", "srcname", filename)
| apache-2.0 |
thanatoskira/cansina | core/payload.py | 2 | 3505 | import threading
import Queue
import time
from core.task import Task
def _populate_list_with_file(file_name):
""" Open a file, read its content and strips it. Returns a list with the content
additionally it filter and clean some splinters
"""
with open(file_name, 'r') as f:
tmp_list = f.readlines()
clean_list = []
for e in tmp_list:
# Delete leading and trailing spaces
e = e.strip()
# Skip commented lines in payload files
if e.startswith('#'):
continue
# Remove leading '/' characters
if e.startswith('/'):
e = e[1:]
clean_list.append(e.decode("utf-8", "replace"))
return clean_list
def _has_extension(res):
#wether the last path sector has '.'
if res.rfind("/") == -1:
return "." in res
else:
return "." in res[res.rfind("/"):]
class Payload():
def __init__(self, target, payload_filename):
self.target = target
self.payload_filename = payload_filename
self.payload = _populate_list_with_file(payload_filename)
self.queue = Queue.Queue()
self.dead = False
self.extensions = None
self.length = len(self.payload)
self.banned_response_codes = None
self.unbanned_response_codes = None
self.content = None
self.remove_slash = False
self.uppercase = False
def set_remove_slash(self, remove_slash):
self.remove_slash = remove_slash
def set_banned_response_codes(self, banned_response_codes):
self.banned_response_codes = banned_response_codes
def set_unbanned_response_codes(self, unbanned_response_codes):
self.unbanned_response_codes = unbanned_response_codes
def set_extensions(self, extensions):
self.extensions = extensions
def set_content(self, content):
self.content = content
def get_length(self):
return self.length
def get_total_requests(self):
return self.length * len(self.extensions)
def kill(self):
self.dead = True
def is_finished(self):
return self.dead
def set_uppercase(self, uppercase):
self.uppercase = uppercase
def get_queue(self):
task_id = 0
for resource in self.payload:
if self.uppercase:
resource = resource.upper()
task_id += 1
# Useful when looking for files without extension instead of directories
if self.remove_slash and resource.endswith("/"):
resource = resource[:-1]
for extension in self.extensions:
# If resource is a whole word and user didnt provide a extension
# put a final /
if not extension and not _has_extension(resource) and not self.remove_slash:
resource += '/'
# Put a . before extension if the users didnt do it
if extension and not '.' in extension:
extension = '.' + extension
task = Task(task_id, self.target, resource, extension)
task.set_payload_filename(self.payload_filename)
task.set_payload_length(self.length)
task.set_banned_response_codes(self.banned_response_codes)
task.set_unbanned_response_codes(self.unbanned_response_codes)
task.set_content(self.content)
self.queue.put(task)
return self.queue
| gpl-3.0 |
willseward/cattle | tests/integration/cattletest/core/test_user_preferences.py | 8 | 2401 | from common_fixtures import * # NOQA
from gdapi import ApiError
@pytest.fixture(scope='module')
def user_client(context):
return context.user_client
def _user_preference(client, name=None):
if name is None:
name = random_str()
preference = client.wait_success(client.create_user_preference(
name=name, value=random_str()))
got_preference = client.by_id('userPreference', preference.id)
assert preference.id == got_preference.id
assert name == got_preference.name
assert preference.value == got_preference.value
return got_preference
def test_create_user_preference(user_client):
_user_preference(user_client)
def test_delete_user_preference(user_client):
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.deactivate())
preference = user_client.wait_success(preference.remove())
preference = user_client.wait_success(preference.purge())
preference = user_client.by_id('userPreference', preference.id)
assert preference.state == 'purged'
preference = _user_preference(user_client)
preference = user_client.wait_success(preference.remove())
assert preference.state == 'removed'
preference = user_client.wait_success(preference.purge())
assert preference.state == 'purged'
def test_update_user_preference(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_update_user_preference_pass_name(user_client):
preference = _user_preference(user_client)
new_value = random_str()
user_client.update(preference, name=preference.name, value=new_value)
got_preference = user_client.by_id('userPreference', preference.id)
assert got_preference.value == new_value
def test_unique_user_preference(user_client, admin_user_client):
rand_str = random_str()
_user_preference(user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(user_client, name=rand_str)
assert e.value.error.status == 422
_user_preference(admin_user_client, name=rand_str)
with pytest.raises(ApiError) as e:
_user_preference(admin_user_client, name=rand_str)
assert e.value.error.status == 422
| apache-2.0 |
mdublin/Brightcove-Dynamic-Ingest-App | ENV/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py | 441 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.3"
__author__ = "Donald Stufft"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| mit |
Enucatl/pilatus-experiments | scripts/time_series2csv.py | 1 | 1133 | """Read the time series and output a csv"""
import argparse
import h5py
import csv
import sys
import numpy as np
parser = argparse.ArgumentParser(
__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"file",
nargs=1,
help="hdf5 file"
)
if __name__ == '__main__':
args = parser.parse_args()
file_name = args.file[0]
hdf5_file = h5py.File(file_name, "r")
hdf5_group = hdf5_file["raw_images"]
writer = csv.writer(sys.stdout)
exposures = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]
n_files = [1000, 1000, 1000, 500, 500, 500, 200, 200, 200, 100]
writer.writerow(["exposure", "signal", "noise", "snr"])
datasets = np.array([dataset for dataset in hdf5_group.values()])
print(datasets.shape)
i = 0
for exposure, n in zip(exposures, n_files):
dataset = datasets[i:(i + n), 0, ...]
print(dataset.shape)
i += n
signal = np.mean(dataset, axis=0)
noise = np.std(dataset, axis=0)
snr = signal / noise
writer.writerow([exposure, signal, noise, snr])
hdf5_file.close()
| gpl-3.0 |
redhat-performance/tuned | tuned/plugins/plugin_irqbalance.py | 1 | 3151 | from . import base
from .decorators import command_custom
from tuned import consts
import tuned.logs
import errno
import perf
import re
log = tuned.logs.get()
class IrqbalancePlugin(base.Plugin):
"""
Plugin for irqbalance settings management.
"""
def __init__(self, *args, **kwargs):
super(IrqbalancePlugin, self).__init__(*args, **kwargs)
self._cpus = perf.cpu_map()
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
def _instance_cleanup(self, instance):
pass
@classmethod
def _get_config_options(cls):
return {
"banned_cpus": None,
}
def _read_irqbalance_sysconfig(self):
try:
with open(consts.IRQBALANCE_SYSCONFIG_FILE, "r") as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
log.warn("irqbalance sysconfig file is missing. Is irqbalance installed?")
else:
log.error("Failed to read irqbalance sysconfig file: %s" % e)
return None
def _write_irqbalance_sysconfig(self, content):
try:
with open(consts.IRQBALANCE_SYSCONFIG_FILE, "w") as f:
f.write(content)
return True
except IOError as e:
log.error("Failed to write irqbalance sysconfig file: %s" % e)
return False
def _write_banned_cpus(self, sysconfig, banned_cpumask):
return sysconfig + "IRQBALANCE_BANNED_CPUS=%s\n" % banned_cpumask
def _clear_banned_cpus(self, sysconfig):
lines = []
for line in sysconfig.split("\n"):
if not re.match(r"\s*IRQBALANCE_BANNED_CPUS=", line):
lines.append(line)
return "\n".join(lines)
def _restart_irqbalance(self):
# Exit code 5 means unit not found (see 'EXIT_NOTINSTALLED' in
# systemd.exec(5))
retcode, out = self._cmd.execute(
["systemctl", "try-restart", "irqbalance"],
no_errors=[5])
if retcode != 0:
log.warn("Failed to restart irqbalance. Is it installed?")
def _set_banned_cpus(self, banned_cpumask):
content = self._read_irqbalance_sysconfig()
if content is None:
return
content = self._clear_banned_cpus(content)
content = self._write_banned_cpus(content, banned_cpumask)
if self._write_irqbalance_sysconfig(content):
self._restart_irqbalance()
def _restore_banned_cpus(self):
content = self._read_irqbalance_sysconfig()
if content is None:
return
content = self._clear_banned_cpus(content)
if self._write_irqbalance_sysconfig(content):
self._restart_irqbalance()
@command_custom("banned_cpus", per_device=False)
def _banned_cpus(self, enabling, value, verify, ignore_missing):
banned_cpumask = None
if value is not None:
banned = set(self._cmd.cpulist_unpack(value))
present = set(self._cpus)
if banned.issubset(present):
banned_cpumask = self._cmd.cpulist2hex(list(banned))
else:
str_cpus = ",".join([str(x) for x in self._cpus])
log.error("Invalid banned_cpus specified, '%s' does not match available cores '%s'"
% (value, str_cpus))
if (enabling or verify) and banned_cpumask is None:
return None
if verify:
# Verification is currently not supported
return None
elif enabling:
self._set_banned_cpus(banned_cpumask)
else:
self._restore_banned_cpus()
| gpl-2.0 |
cellus-sas/heavy-encypted-chat | http_side/runkey.py | 1 | 8371 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#need python-crypto,py-crypto,python-keyzcar
#
from Crypto.Cipher import DES3
from Crypto.Cipher import AES
from Crypto import Random
from hashlib import md5
from hashlib import sha256
import os
import time
import pickle
local=False
def des3_encrypt_file(in_filename, out_filename, chunk_size, key, iv):
des3 = DES3.new(key, DES3.MODE_CFB, iv)
with open(in_filename, 'r') as in_file:
with open(out_filename, 'w') as out_file:
while True:
chunk = in_file.read(chunk_size)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += ' ' * (16 - len(chunk) % 16)
out_file.write(des3.encrypt(chunk))
def des3_decrypt_file(in_filename, out_filename, chunk_size, key, iv):
des3 = DES3.new(key, DES3.MODE_CFB, iv)
with open(in_filename, 'r') as in_file:
with open(out_filename, 'w') as out_file:
while True:
chunk = in_file.read(chunk_size)
if len(chunk) == 0:
break
out_file.write(des3.decrypt(chunk))
def des3_encrypt_str(in_str, chunk_size, key, iv):
in_file="temp_node"
out_file="temp_node"+"__encrypted"
with open(in_file,'w') as f:
f.write(in_str)
des3_encrypt_file(in_file, out_file, chunk_size, key, iv)
out_str=""
with open(out_file,'r') as f:
out_str=f.read()
return out_str
def des3_decrypt_str(in_str, chunk_size, key, iv):
in_file="temp_node"
out_file="temp_node"+"__decrypted"
with open(in_file,'w') as f:
f.write(in_str)
des3_decrypt_file(in_file, out_file, chunk_size, key, iv)
out_str=""
with open(out_file,'r') as f:
out_str=f.read()
return out_str
def aes_encrypt_str(in_str, key, iv):
initial_in_str=in_str
mode = AES.MODE_CBC
encryptor = AES.new(key, mode, iv)
if len(in_str) == 0:
pass
elif len(in_str) % 16 != 0:
add_len=' ' * (16 - len(in_str) % 16)
#print len(add_len)
in_str += ' ' * (16 - len(in_str) % 16)
#print "'"+in_str+'"'
#print "'"+in_str.encode('utf-8')+'"'
#for each in in_str:
# print "'"+each+"'"
#print in_str.encode('latin-1')
out_str = encryptor.encrypt(in_str.encode('latin-1'))
return [out_str,iv,len(add_len),sha256(initial_in_str.encode('UTF-8')).hexdigest()]
def aes_decrypt_str(in_str, key="",iv=0,add_len="",original_hexdigest=""):
mode = AES.MODE_CBC
encryptor = AES.new(key, mode,iv)
out_str = encryptor.decrypt(in_str)
#print "out>" + out_str.decode('latin-1')
if len(out_str) == 0:
pass
elif len(in_str) % 16 == 0:
if original_hexdigest!="":
#print "hexdigest verification"
hex_correlation=False
cp=0
exiting=False
out_str=unicode(out_str.decode('latin-1'))
print out_str
while exiting==False:
#print "'"+out_str+"'"
#print out_str
#print "original ",original_hexdigest
#print sha256(out_str.encode('UTF-8')).hexdigest()
#print sha256(out_str.decode('UTF-8')).hexdigest()
#print sha256(out_str.decode('latin-1').encode('UTF-8')).hexdigest()
#out_str=unicode(out_str.decode('latin-1'))
try:
current_hex=sha256(out_str.encode('UTF-8')).hexdigest()
except:
print "error a l encodage car il y a des accent"
current_hex=""
# current_hex=sha256(out_str.encode('UTF-8')).hexdigest()
#print current_hex+"*>"+original_hexdigest
if current_hex!=original_hexdigest:
#print "remove last char"
try:
out_str= out_str[:len(out_str)-1]
except:
print "error no hexdigest verification for this one"
else:
print "hexdigest verification : OK !"
hex_correlation=True
cp=cp+1
if hex_correlation==True or cp>16:
exiting=True
#time.sleep(1)
else:
#print add_len
out_str= out_str[:len(out_str)-add_len]
#out_str=out_str[len_add:]
pass
return out_str
global key
def get_ck_from_obj(): #need protection if file not exist or other
global local,key
key=['','','']
if local==True: #<<<<<<<<<<<<<<<-change when not local
path_cu_key=os.getcwd()+os.sep+'..'+os.sep+'current_key.pkl'
else:
path_cu_key=os.getcwd()+os.sep+'current_key.pkl'
try:
pkl_file = open(path_cu_key, 'rb')
key = pickle.load(pkl_file)
pkl_file.close()
except:
pass
return key
def get_ck_from_timestamp(ts="",day="130709"): #need protection if file not exist or other
key=['','','']
keypathdir="/home/noname/Desktop/Integrate_chat/crypto_client/key"
try:
pkl_file = open(keypathdir+os.sep+day+".pkl", 'rb')
key_db = pickle.load(pkl_file)
for each in key_db:
if each[1]==ts:
key= each
break
except:
pass
return key
def get_encrypted(in_str="",ck=""):
if ck=="":
ck_obj=get_ck_from_obj()
ts=ck_obj[1]
ck=ck_obj[-1]
else:
ts=""
iv = Random.get_random_bytes(16)
key = md5(ck).hexdigest()
print "encryption md5 key "+key
enctext,iv,d,h = aes_encrypt_str(in_str, key, iv)
return enctext,iv,d,h,ts
def get_decrypted(encrypted_content="",ck="",iv=0,difference="",h=""):
if ck=="":
ck=get_ck_from_obj()[-1]
pass
key = md5(ck).hexdigest()
print "decrypt md5 key "+key
clear= aes_decrypt_str(encrypted_content,key,iv,difference,h)
print "aes decrypt> '"+clear+"'"
return clear
#c="abcdefg"
#print c[2:]
#print c[len(c)-2:]
#c="abcdefg"
#print c[:2]
#print c[:len(c)-2]
def demo():
in_str=u'adég mai.com'
ck="" #if ck=="" use currentkey
enc=get_encrypted(in_str,ck)
print enc
encrypted_content,iv,difference,h,ts=enc
print iv
print get_decrypted(encrypted_content,ck,iv,difference,h)
#demo()
def get_ck_debug(ck="",day="130708"): #need protection if file not exist or other
key=['','','']
keypathdir="/home/noname/Desktop/Integrate_chat/crypto_client/key"
try:
pkl_file = open(keypathdir+os.sep+day+".pkl", 'rb')
key_db = pickle.load(pkl_file)
for each in key_db:
#print each
if each[-1]==ck:
key= each
break
except:
pass
return key
#ck="""MIIEowIBAAKCAQEAs2A1Q0DZx7WxhrVMnD9DOFhVpfa1N7mE3lVPdAo6f4ME4LXfIANI6DknJ81Udp8AbySOT6/l5m5TyM5v6KQvSRtC0KD0/d2ew3SyTvrnUklXb+c2x71Hg6llClleROsNAg3A55Kxh2CE/BrY7JcLXQHfBaNHQxJbtHi7YYi1DbcscxXn7SZ5bTHXmx/tLuLuwarTkeCgdu3mnNNnT+8+Naxfi/l/gfKAHCJHGR+8UjqSx9ztmE5l+kb5gl74O/Q+a7QaraT12j1EVpEaxRMpToAeHTEtFyrmXkvqWGkzXaQu4KYhsReHz1byOiQVAZU18T6ARf7fo93Lzz5GYH1ZmQIDAQABAoIBAFIytDWtch7iVAe27PRsyRD46caz1zdB/HEmBtLWHRhxobqXnTe+SZqhFiBXJ210T26fAdficye1Rw+uCfpBwqltpKCWIa5z8F0BDPTEZVx/32GYQrIlOrBK39JuQZSzYbKfOtbjkhbHVCly5BUG4l4sjVa4C9/gecWpzbRUQSOXF541XAOmUwpZ6m/H5Nm06tBrAwDpgkcFMEUP4s+9grTGPOmEhi8ZVauirC/3q/wn1vHdci4jZVbL75VukBcZCq2t6XAZcsAPnvgWAex0JMqaCySOYu06R4Fm2n8XRerLE182eEcaPsz9CHfO/eivfrNII5eBOnDxcQxogE9q+AECgYEA7HA54pWyqOhmrFYpms74+W9R8v5NwGDakqE9rsm8wUiaXftH0v9NQcDbUy+E6G+RTa8ceYdiCQ/JYFFtK/tGvpQISjGcJun/ld68iY40qWOvhonwtaCPqEmo4UViWVksQqrHyLEYFVhbo0jQOjMkENJ4SRD7Q1fd4fQ8w5J9i3ECgYEAwjdlAOaA2cmPHhfvMKxYHwLIjLuQqns2LmOC1QhO+OIVQ9oO9zhRdfXer49Ie+HyWO005OWBIejX1TCwAwhFjoPbFI8app96p7gpuW2CSnbCOO8Yk96OynXlN/DqKuC58zcDG8hUgLlNOyALhTnNJNLiY6wtDkxxpNyPp7LDDKkCgYEA2mg04Hz+I49CwPF6zzlfvjK51ahaNFqDra1qqFpMlZM2nZgwWdViDVpUf7xGntvosoUO3ahUxCAkGmg9W8JrMELYgYjgQQYpBc1SBhMpzEt8aeBkTbL64S7h5O5OElEQVKkjkd0dbSJIzEXHq+tv5mY1nPEl2aiCG2ac9uAMPtECgYALDcAP5w6aVqBwpAgXCxgQ28WyTNKVAWI6DavamBh2jdeL/xMu+uOYBSBheZQ0iM2URhvmkzFgTrJKDfVWltfuno6Pgv2PUjBW72JgjV0HA+9V8jXB5L7XwxICtxF800GCGDVzFVnJ4cIFhXNiZ8HHQMFlztzZnXwyV+NNNh1n0QKBgEBmIWBAIk0/025b9W5RJtETjgkYBIYmfUVUmfxWZGbAHqS6IVONuhu+9Y8++X/ybr+60o3oLbtyGuI8Q0vdtQ3mdB91ds5UWuT2PSZMYXyRZZI+3KM4BJi/CJ7lO/iIJeHHQpYY9Is2vAyS+CTvLQFDWEBnuudyPgtilevsAiuD"""
#print get_ck_debug(ck)
#print get_ck_from_obj()
#print get_ck_from_timestamp(ts="81260",day="130709")
#~ iv = Random.get_random_bytes(8)
#~ key=md5(ck).hexdigest()
#~ key="123456781234567812345678"
#~ original_str='[email protected]'
#~ encrypted_str= des3_encrypt_str(original_str,9192,key,iv)
#~ decrypted_str= des3_decrypt_str(encrypted_str,9192,key,iv)
#~ print "Original \n'" + original_str +"'"
#~ print "des3_Encrypted \n'" + encrypted_str +"'"
#~ print "des3_Decrypted from encrypted (mean Original) \n'" + decrypted_str +"'"
#~ print ord(decrypted_str[-1])
#~ print ord(decrypted_str[-2])
#iv = Random.get_random_bytes(8)
#key="1234567812345678"
#encrypt_file('runkey.py','runkey.py___encrypted',9192,key,iv)
#key="1234567812345678"
#decrypt_file('runkey.py___encrypted','runkey.py___decrypted',9192,key,iv)
| mit |
rggjan/gegl-global-matting | bindings/pygegl/Gegl/__init__.py | 7 | 1702 | # PyGEGL - Python bindings for the GEGL image processing library
# Copyright (C) 2007 Manish Singh
#
# __init__.py: initialization file for the Gegl package
#
# PyGEGL is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# PyGEGL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with PyGEGL; if not, see <http://www.gnu.org/licenses/>.
# dl tricks from GST python's __init__.py
import sys
def setdlopenflags():
oldflags = sys.getdlopenflags()
try:
from DLFCN import RTLD_GLOBAL, RTLD_LAZY
except ImportError:
RTLD_GLOBAL = -1
RTLD_LAZY = -1
import os
osname = os.uname()[0]
if osname == 'Linux' or osname == 'SunOS' or osname == 'FreeBSD':
RTLD_GLOBAL = 0x100
RTLD_LAZY = 0x1
elif osname == 'Darwin':
RTLD_GLOBAL = 0x8
RTLD_LAZY = 0x1
del os
except:
RTLD_GLOBAL = -1
RTLD_LAZY = -1
if RTLD_GLOBAL != -1 and RTLD_LAZY != -1:
sys.setdlopenflags(RTLD_LAZY | RTLD_GLOBAL)
return oldflags
oldflags = setdlopenflags()
from _gegl import *
sys.setdlopenflags(oldflags)
del sys, setdlopenflags
from fifthleg import *
import atexit
atexit.register(exit)
del exit, atexit
del _gegl
| gpl-3.0 |
Natim/sentry | tests/sentry/api/endpoints/test_project_group_index.py | 8 | 19043 | from __future__ import absolute_import
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from sentry.models import (
EventMapping, Group, GroupBookmark, GroupSeen, GroupStatus
)
from sentry.testutils import APITestCase
from sentry.testutils.helpers import parse_link_header
class GroupListTest(APITestCase):
def _parse_links(self, header):
# links come in {url: {...attrs}}, but we need {rel: {...attrs}}
links = {}
for url, attrs in parse_link_header(header).iteritems():
links[attrs['rel']] = attrs
attrs['href'] = url
return links
def test_simple_pagination(self):
project = self.project
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(seconds=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?sort_by=date&limit=1', format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['next']['cursor'])
response = self.client.get(links['next']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group1.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'true'
assert links['next']['results'] == 'false'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 0
group3 = self.create_group(
checksum='c' * 32,
last_seen=now + timedelta(seconds=1),
)
links = self._parse_links(response['Link'])
assert links['previous']['results'] == 'false'
assert links['next']['results'] == 'true'
print(links['previous']['cursor'])
response = self.client.get(links['previous']['href'], format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group3.id)
def test_stats_period(self):
# TODO(dcramer): this test really only checks if validation happens
# on statsPeriod
project = self.project
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(seconds=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?statsPeriod=24h', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=14d', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=', format='json')
assert response.status_code == 200
response = self.client.get(url + '?statsPeriod=48h', format='json')
assert response.status_code == 400
def test_auto_resolved(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group1 = self.create_group(
checksum='a' * 32,
last_seen=now - timedelta(days=1),
)
group2 = self.create_group(
checksum='b' * 32,
last_seen=now,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group2.id)
def test_lookup_by_event_id(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group = self.create_group(checksum='a' * 32)
self.create_group(checksum='b' * 32)
EventMapping.objects.create(
event_id='c' * 32,
project=group.project,
group=group,
)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?query=' + ('c' * 32), format='json')
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(group.id)
def test_lookup_by_unknown_event_id(self):
project = self.project
project.update_option('sentry:resolve_age', 1)
now = timezone.now()
group = self.create_group(checksum='a' * 32)
self.create_group(checksum='b' * 32)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.get(url + '?query=' + ('c' * 32), format='json')
assert response.status_code == 200
assert len(response.data) == 0
class GroupUpdateTest(APITestCase):
def test_global_resolve(self):
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.put(url + '?status=unresolved', data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200, response.data
assert response.data == {
'status': 'resolved',
}
# the previously resolved entry should not be included
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.status == GroupStatus.RESOLVED
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.status == GroupStatus.RESOLVED
assert new_group2.resolved_at is not None
# the muted entry should not be included
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.status == GroupStatus.MUTED
assert new_group3.resolved_at is None
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.status == GroupStatus.UNRESOLVED
assert new_group4.resolved_at is None
def test_selective_status_update(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 200
assert response.data == {
'status': 'resolved',
}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.resolved_at is None
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.resolved_at is not None
assert new_group2.status == GroupStatus.RESOLVED
new_group3 = Group.objects.get(id=group3.id)
assert new_group3.resolved_at is None
assert new_group3.status == GroupStatus.MUTED
new_group4 = Group.objects.get(id=group4.id)
assert new_group4.resolved_at is None
assert new_group4.status == GroupStatus.UNRESOLVED
def test_set_unresolved(self):
project = self.project
group = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group=group,
)
response = self.client.put(url, data={
'status': 'unresolved',
}, format='json')
assert response.status_code == 200
assert response.data == {
'status': 'unresolved',
}
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
def test_set_bookmarked(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'isBookmarked': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isBookmarked': True,
}
bookmark1 = GroupBookmark.objects.filter(group=group1, user=self.user)
assert bookmark1.exists()
bookmark2 = GroupBookmark.objects.filter(group=group2, user=self.user)
assert bookmark2.exists()
bookmark3 = GroupBookmark.objects.filter(group=group3, user=self.user)
assert not bookmark3.exists()
bookmark4 = GroupBookmark.objects.filter(group=group4, user=self.user)
assert not bookmark4.exists()
def test_set_public(self):
group1 = self.create_group(checksum='a' * 32, is_public=False)
group2 = self.create_group(checksum='b' * 32, is_public=False)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
)
response = self.client.put(url, data={
'isPublic': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isPublic': True,
}
new_group1 = Group.objects.get(id=group1.id)
assert new_group1.is_public
new_group2 = Group.objects.get(id=group2.id)
assert new_group2.is_public
def test_set_private(self):
group1 = self.create_group(checksum='a' * 32, is_public=True)
group2 = self.create_group(checksum='b' * 32, is_public=True)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
)
response = self.client.put(url, data={
'isPublic': 'false',
}, format='json')
assert response.status_code == 200
assert response.data == {
'isPublic': False,
}
new_group1 = Group.objects.get(id=group1.id)
assert not new_group1.is_public
new_group2 = Group.objects.get(id=group2.id)
assert not new_group2.is_public
def test_set_has_seen(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
response = self.client.put(url, data={
'hasSeen': 'true',
}, format='json')
assert response.status_code == 200
assert response.data == {
'hasSeen': True,
}
r1 = GroupSeen.objects.filter(group=group1, user=self.user)
assert r1.exists()
r2 = GroupSeen.objects.filter(group=group2, user=self.user)
assert r2.exists()
r3 = GroupSeen.objects.filter(group=group3, user=self.user)
assert not r3.exists()
r4 = GroupSeen.objects.filter(group=group4, user=self.user)
assert not r4.exists()
@patch('sentry.api.endpoints.project_group_index.merge_group')
def test_merge(self, merge_group):
project = self.project
group1 = self.create_group(checksum='a' * 32, times_seen=1)
group2 = self.create_group(checksum='b' * 32, times_seen=50)
group3 = self.create_group(checksum='c' * 32, times_seen=2)
group4 = self.create_group(checksum='d' * 32)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&id={group3.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group3=group3,
)
response = self.client.put(url, data={
'merge': '1',
}, format='json')
assert response.status_code == 200
assert response.data['merge']['parent'] == str(group2.id)
assert sorted(response.data['merge']['children']) == [
str(group1.id),
str(group3.id),
]
assert len(merge_group.mock_calls) == 2
merge_group.delay.assert_any_call(from_object_id=group1.id, to_object_id=group2.id)
merge_group.delay.assert_any_call(from_object_id=group3.id, to_object_id=group2.id)
class GroupDeleteTest(APITestCase):
def test_global_is_forbidden(self):
project = self.project
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
})
response = self.client.delete(url, data={
'status': 'resolved',
}, format='json')
assert response.status_code == 400
def test_delete_by_id(self):
project = self.project
group1 = self.create_group(checksum='a' * 32, status=GroupStatus.RESOLVED)
group2 = self.create_group(checksum='b' * 32, status=GroupStatus.UNRESOLVED)
group3 = self.create_group(checksum='c' * 32, status=GroupStatus.MUTED)
group4 = self.create_group(
project=self.create_project(slug='foo'),
checksum='b' * 32, status=GroupStatus.UNRESOLVED)
self.login_as(user=self.user)
url = '{url}?id={group1.id}&id={group2.id}&group4={group4.id}'.format(
url=reverse('sentry-api-0-project-group-index', kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}),
group1=group1,
group2=group2,
group4=group4,
)
with self.tasks():
response = self.client.delete(url, format='json')
assert response.status_code == 204
new_group1 = Group.objects.filter(id=group1.id)
assert not new_group1.exists()
new_group2 = Group.objects.filter(id=group2.id)
assert not new_group2.exists()
new_group3 = Group.objects.filter(id=group3.id)
assert new_group3.exists()
new_group4 = Group.objects.filter(id=group4.id)
assert new_group4.exists()
| bsd-3-clause |
gigglesninja/senior-design | MAVProxy/MAVProxy/modules/lib/mp_menu.py | 4 | 10663 | #!/usr/bin/env python
'''
menu handling widgets for wx
Andrew Tridgell
November 2013
'''
import wx
from MAVProxy.modules.lib import mp_util
class MPMenuGeneric(object):
'''a MP menu separator'''
def __init__(self):
pass
def find_selected(self, event):
return None
def _append(self, menu):
'''append this menu item to a menu'''
pass
def __str__(self):
return "MPMenuGeneric()"
def __repr__(self):
return str(self.__str__())
class MPMenuSeparator(MPMenuGeneric):
'''a MP menu separator'''
def __init__(self):
MPMenuGeneric.__init__(self)
def _append(self, menu):
'''append this menu item to a menu'''
menu.AppendSeparator()
def __str__(self):
return "MPMenuSeparator()"
class MPMenuItem(MPMenuGeneric):
'''a MP menu item'''
def __init__(self, name, description='', returnkey=None, handler=None):
MPMenuGeneric.__init__(self)
self.name = name
self.description = description
self.returnkey = returnkey
self.handler = handler
self.handler_result = None
def find_selected(self, event):
'''find the selected menu item'''
if event.GetId() == self.id():
return self
return None
def call_handler(self):
'''optionally call a handler function'''
if self.handler is None:
return
call = getattr(self.handler, 'call', None)
if call is not None:
self.handler_result = call()
def id(self):
'''id used to identify the returned menu items
uses a 16 bit signed integer'''
# 0x7FFF is used as windows only allows for 16 bit IDs
return int(hash((self.name, self.returnkey)) & 0x7FFF)
def _append(self, menu):
'''append this menu item to a menu'''
menu.Append(self.id(), self.name, self.description)
def __str__(self):
return "MPMenuItem(%s,%s,%s)" % (self.name, self.description, self.returnkey)
class MPMenuCheckbox(MPMenuItem):
'''a MP menu item as a checkbox'''
def __init__(self, name, description='', returnkey=None, checked=False, handler=None):
MPMenuItem.__init__(self, name, description=description, returnkey=returnkey, handler=handler)
self.checked = checked
def find_selected(self, event):
'''find the selected menu item'''
if event.GetId() == self.id():
self.checked = event.IsChecked()
return self
return None
def IsChecked(self):
'''return true if item is checked'''
return self.checked
def _append(self, menu):
'''append this menu item to a menu'''
menu.AppendCheckItem(self.id(), self.name, self.description)
menu.Check(self.id(), self.checked)
def __str__(self):
return "MPMenuCheckbox(%s,%s,%s,%s)" % (self.name, self.description, self.returnkey, str(self.checked))
class MPMenuRadio(MPMenuItem):
'''a MP menu item as a radio item'''
def __init__(self, name, description='', returnkey=None, selected=None, items=[], handler=None):
MPMenuItem.__init__(self, name, description=description, returnkey=returnkey, handler=handler)
self.items = items
self.choice = 0
self.initial = selected
def set_choices(self, items):
'''set radio item choices'''
self.items = items
def get_choice(self):
'''return the chosen item'''
return self.items[self.choice]
def find_selected(self, event):
'''find the selected menu item'''
first = self.id()
last = first + len(self.items) - 1
evid = event.GetId()
if evid >= first and evid <= last:
self.choice = evid - first
return self
return None
def _append(self, menu):
'''append this menu item to a menu'''
submenu = wx.Menu()
for i in range(len(self.items)):
submenu.AppendRadioItem(self.id()+i, self.items[i], self.description)
if self.items[i] == self.initial:
submenu.Check(self.id()+i, True)
menu.AppendMenu(-1, self.name, submenu)
def __str__(self):
return "MPMenuRadio(%s,%s,%s,%s)" % (self.name, self.description, self.returnkey, self.get_choice())
class MPMenuSubMenu(MPMenuGeneric):
'''a MP menu item'''
def __init__(self, name, items):
MPMenuGeneric.__init__(self)
self.name = name
self.items = items
def add(self, items, addto=None):
'''add more items to a sub-menu'''
if not isinstance(items, list):
items = [items]
for m in items:
updated = False
for i in range(len(self.items)):
try:
if self.items[i].name == m.name:
self.items[i] = m
updated = True
except Exception:
pass
if not updated:
self.items.append(m)
def combine(self, submenu):
'''combine a new menu with an existing one'''
self.items.extend(submenu.items)
def wx_menu(self):
'''return a wx.Menu() for this menu'''
menu = wx.Menu()
for i in range(len(self.items)):
m = self.items[i]
m._append(menu)
return menu
def find_selected(self, event):
'''find the selected menu item'''
for m in self.items:
ret = m.find_selected(event)
if ret is not None:
return ret
return None
def _append(self, menu):
'''append this menu item to a menu'''
menu.AppendMenu(-1, self.name, self.wx_menu())
def __str__(self):
return "MPMenuSubMenu(%s)" % (self.name)
class MPMenuTop(object):
'''a MP top level menu'''
def __init__(self, items):
self.items = items
def add(self, items):
'''add a submenu'''
if not isinstance(items, list):
items = [items]
for m in items:
updated = False
for i in range(len(self.items)):
if self.items[i].name == m.name:
self.items[i] = m
updated = True
if not updated:
self.items.append(m)
def wx_menu(self):
'''return a wx.MenuBar() for the menu'''
menubar = wx.MenuBar()
for i in range(len(self.items)):
m = self.items[i]
menubar.Append(m.wx_menu(), m.name)
return menubar
def find_selected(self, event):
'''find the selected menu item'''
for i in range(len(self.items)):
m = self.items[i]
ret = m.find_selected(event)
if ret is not None:
return ret
return None
class MPMenuCallFileDialog(object):
'''used to create a file dialog callback'''
def __init__(self, flags=wx.FD_OPEN, title='Filename', wildcard='*.*'):
self.flags = flags
self.title = title
self.wildcard = wildcard
def call(self):
'''show a file dialog'''
dlg = wx.FileDialog(None, self.title, '', "", self.wildcard, self.flags)
if dlg.ShowModal() != wx.ID_OK:
return None
return dlg.GetPath()
class MPMenuCallTextDialog(object):
'''used to create a value dialog callback'''
def __init__(self, title='Enter Value', default=''):
self.title = title
self.default = default
def call(self):
'''show a value dialog'''
dlg = wx.TextEntryDialog(None, self.title, self.title, defaultValue=str(self.default))
if dlg.ShowModal() != wx.ID_OK:
return None
return dlg.GetValue()
class MPMenuChildMessageDialog(object):
'''used to create a message dialog in a child process'''
def __init__(self, title='Information', message='', font_size=18):
self.title = title
self.message = message
self.font_size = font_size
import multiprocessing
t = multiprocessing.Process(target=self.show)
t.start()
def show(self):
'''show the dialog as a child process'''
mp_util.child_close_fds()
from wx.lib.agw.genericmessagedialog import GenericMessageDialog
app = wx.PySimpleApp()
# note! font size change is not working. I don't know why yet
font = wx.Font(self.font_size, wx.MODERN, wx.NORMAL, wx.NORMAL)
dlg = GenericMessageDialog(None, self.message, self.title, wx.ICON_INFORMATION|wx.OK)
dlg.SetFont(font)
dlg.ShowModal()
app.MainLoop()
if __name__ == '__main__':
from MAVProxy.modules.lib.mp_image import MPImage
import time
im = MPImage(mouse_events=True,
key_events=True,
can_drag = False,
can_zoom = False,
auto_size = True)
menu = MPMenuTop([MPMenuSubMenu('&File',
items=[MPMenuItem('&Open\tCtrl+O'),
MPMenuItem('&Save\tCtrl+S'),
MPMenuItem('Close', 'Close'),
MPMenuItem('&Quit\tCtrl+Q', 'Quit')]),
MPMenuSubMenu('Edit',
items=[MPMenuSubMenu('Option',
items=[MPMenuItem('Foo'),
MPMenuItem('Bar'),
MPMenuSeparator(),
MPMenuCheckbox('&Grid\tCtrl+G')]),
MPMenuItem('Image', 'EditImage'),
MPMenuRadio('Colours',
items=['Red','Green','Blue']),
MPMenuRadio('Shapes',
items=['Circle','Square','Triangle'])])])
im.set_menu(menu)
popup = MPMenuSubMenu('A Popup',
items=[MPMenuItem('Sub1'),
MPMenuItem('Sub2'),
MPMenuItem('Sub3')])
im.set_popup_menu(popup)
while im.is_alive():
for event in im.events():
if isinstance(event, MPMenuItem):
print(event, getattr(event, 'popup_pos', None))
continue
else:
print(event)
time.sleep(0.1)
| gpl-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/scipy/ndimage/interpolation.py | 55 | 25609 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output = numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> sp.ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = range(input.ndim)
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| gpl-3.0 |
ToonTownInfiniteRepo/ToontownInfinite | toontown/minigame/DistributedIceGameAI.py | 2 | 15666 | from pandac.PandaModules import Point3
from direct.distributed.ClockDelta import globalClockDelta
from direct.fsm import ClassicFSM, State
from direct.task import Task
from toontown.minigame import DistributedMinigameAI
from toontown.minigame import MinigameGlobals
from toontown.minigame import IceGameGlobals
from toontown.ai.ToonBarrier import ToonBarrier
class DistributedIceGameAI(DistributedMinigameAI.DistributedMinigameAI):
notify = directNotify.newCategory('DistributedIceGameAI')
def __init__(self, air, minigameId):
try:
self.DistributedIceGameAI_initialized
except:
self.DistributedIceGameAI_initialized = 1
DistributedMinigameAI.DistributedMinigameAI.__init__(self, air, minigameId)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedIceGameAI', [State.State('off', self.enterOff, self.exitOff, ['waitClientsChoices']),
State.State('waitClientsChoices', self.enterWaitClientsChoices, self.exitWaitClientsChoices, ['cleanup', 'processChoices']),
State.State('processChoices', self.enterProcessChoices, self.exitProcessChoices, ['waitEndingPositions', 'cleanup']),
State.State('waitEndingPositions', self.enterWaitEndingPositions, self.exitWaitEndingPositions, ['processEndingPositions', 'cleanup']),
State.State('processEndingPositions', self.enterProcessEndingPositions, self.exitProcessEndingPositions, ['waitClientsChoices', 'scoreMatch', 'cleanup']),
State.State('scoreMatch', self.enterScoreMatch, self.exitScoreMatch, ['waitClientsChoices', 'finalResults', 'cleanup']),
State.State('finalResults', self.enterFinalResults, self.exitFinalResults, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, ['off'])], 'off', 'off')
self.addChildGameFSM(self.gameFSM)
self.avatarChoices = {}
self.avatarEndingPositions = {}
self.curRound = 0
self.curMatch = 0
self.finalEndingPositions = [Point3(IceGameGlobals.StartingPositions[0]),
Point3(IceGameGlobals.StartingPositions[1]),
Point3(IceGameGlobals.StartingPositions[2]),
Point3(IceGameGlobals.StartingPositions[3])]
def generate(self):
self.notify.debug('generate')
DistributedMinigameAI.DistributedMinigameAI.generate(self)
def delete(self):
self.notify.debug('delete')
taskMgr.remove(self.taskName('wait-choices-timeout'))
taskMgr.remove(self.taskName('endingPositionsTimeout'))
del self.gameFSM
DistributedMinigameAI.DistributedMinigameAI.delete(self)
def setGameReady(self):
self.notify.debug('setGameReady')
DistributedMinigameAI.DistributedMinigameAI.setGameReady(self)
self.numTreasures = IceGameGlobals.NumTreasures[self.getSafezoneId()]
self.numTreasuresTaken = 0
self.takenTreasuresTable = [0] * self.numTreasures
self.numPenalties = IceGameGlobals.NumPenalties[self.getSafezoneId()]
self.numPenaltiesTaken = 0
self.takenPenaltiesTable = [0] * self.numPenalties
def setGameStart(self, timestamp):
self.notify.debug('setGameStart')
DistributedMinigameAI.DistributedMinigameAI.setGameStart(self, timestamp)
self.gameFSM.request('waitClientsChoices')
def setGameAbort(self):
self.notify.debug('setGameAbort')
if self.gameFSM.getCurrentState():
self.gameFSM.request('cleanup')
DistributedMinigameAI.DistributedMinigameAI.setGameAbort(self)
def gameOver(self):
self.notify.debug('gameOver')
self.gameFSM.request('cleanup')
DistributedMinigameAI.DistributedMinigameAI.gameOver(self)
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.gameFSM.request('off')
def exitCleanup(self):
pass
def enterWaitClientsChoices(self):
self.notify.debug('enterWaitClientsChoices')
self.resetChoices()
self.sendUpdate('setMatchAndRound', [self.curMatch, self.curRound])
self.sendUpdate('setNewState', ['inputChoice'])
taskMgr.doMethodLater(IceGameGlobals.InputTimeout, self.waitClientsChoicesTimeout, self.taskName('wait-choices-timeout'))
self.sendUpdate('setTimerStartTime', [globalClockDelta.getFrameNetworkTime()])
def exitWaitClientsChoices(self):
self.notify.debug('exitWaitClientsChoices')
taskMgr.remove(self.taskName('wait-choices-timeout'))
def enterProcessChoices(self):
forceAndHeading = []
for avId in self.avIdList:
force = self.avatarChoices[avId][0]
heading = self.avatarChoices[avId][1]
forceAndHeading.append([force, heading])
self.notify.debug('tireInputs = %s' % forceAndHeading)
self.sendUpdate('setTireInputs', [forceAndHeading])
self.gameFSM.request('waitEndingPositions')
def exitProcessChoices(self):
pass
def enterWaitEndingPositions(self):
if self.curRound == 0:
self.takenTreasuresTable = [0] * self.numTreasures
self.takenPenaltiesTable = [0] * self.numPenalties
taskMgr.doMethodLater(IceGameGlobals.InputTimeout, self.waitClientsChoicesTimeout, self.taskName('endingPositionsTimeout'))
self.avatarEndingPositions = {}
def exitWaitEndingPositions(self):
taskMgr.remove(self.taskName('endingPositionsTimeout'))
def enterProcessEndingPositions(self):
averagePos = [Point3(0, 0, 0),
Point3(0, 0, 0),
Point3(0, 0, 0),
Point3(0, 0, 0)]
divisor = 0
for avId in self.avatarEndingPositions.keys():
divisor += 1
oneClientEndingPositions = self.avatarEndingPositions[avId]
avIndex = self.avIdList.index(avId)
for index in xrange(len(oneClientEndingPositions)):
pos = oneClientEndingPositions[index]
averagePos[index] += Point3(pos[0], pos[1], pos[2])
self.notify.debug('index = %d averagePos = %s' % (index, averagePos))
sentPos = []
if divisor:
for newPos in averagePos:
newPos /= divisor
newPos.setZ(IceGameGlobals.TireRadius)
sentPos.append([newPos[0], newPos[1], newPos[2]])
else:
sentPos = self.finalEndingPositions
self.sendUpdate('setFinalPositions', [sentPos])
self.finalEndingPositions = sentPos
if self.curMatch == IceGameGlobals.NumMatches - 1 and self.curRound == IceGameGlobals.NumRounds - 1:
self.gameFSM.request('scoreMatch')
elif self.curRound == IceGameGlobals.NumRounds - 1:
self.gameFSM.request('scoreMatch')
else:
self.curRound += 1
self.sendUpdate('setMatchAndRound', [self.curMatch, self.curRound])
self.gameFSM.request('waitClientsChoices')
def exitProcessEndingPositions(self):
pass
def enterScoreMatch(self):
sortedByDistance = []
for avId in self.avIdList:
index = self.avIdList.index(avId)
pos = Point3(*self.finalEndingPositions[index])
pos.setZ(0)
sortedByDistance.append((avId, pos.length()))
def compareDistance(x, y):
if x[1] - y[1] > 0:
return 1
elif x[1] - y[1] < 0:
return -1
else:
return 0
sortedByDistance.sort(cmp=compareDistance)
self.scoresAsList = []
totalPointsAdded = 0
for index in xrange(len(self.avIdList)):
pos = Point3(*self.finalEndingPositions[index])
pos.setZ(0)
length = pos.length()
points = length / IceGameGlobals.FarthestLength * (IceGameGlobals.PointsInCorner - IceGameGlobals.PointsDeadCenter[self.numPlayers])
points += IceGameGlobals.PointsDeadCenter[self.numPlayers]
self.notify.debug('length = %s points=%s avId=%d' % (length, points, avId))
avId = self.avIdList[index]
bonusIndex = 0
for sortIndex in xrange(len(sortedByDistance)):
if sortedByDistance[sortIndex][0] == avId:
bonusIndex = sortIndex
bonusIndex += 4 - len(self.avIdList)
pointsToAdd = int(points + 0.5) + IceGameGlobals.BonusPointsForPlace[bonusIndex]
totalPointsAdded += pointsToAdd
self.scoreDict[avId] += pointsToAdd
self.scoresAsList.append(self.scoreDict[avId])
self.curMatch += 1
self.curRound = 0
self.sendUpdate('setScores', [self.curMatch, self.curRound, self.scoresAsList])
self.sendUpdate('setNewState', ['scoring'])
def allToonsScoringMovieDone(self = self):
self.notify.debug('allToonsScoringMovieDone')
if self.curMatch == IceGameGlobals.NumMatches:
self.gameFSM.request('finalResults')
else:
self.gameFSM.request('waitClientsChoices')
def handleTimeout(avIds, self = self):
self.notify.debug('handleTimeout: avatars %s did not report "done"' % avIds)
if self.curMatch == IceGameGlobals.NumMatches:
self.gameFSM.request('finalResults')
else:
self.gameFSM.request('waitClientsChoices')
scoreMovieDuration = IceGameGlobals.FarthestLength * IceGameGlobals.ExpandFeetPerSec
scoreMovieDuration += totalPointsAdded * IceGameGlobals.ScoreCountUpRate
self.scoringMovieDoneBarrier = ToonBarrier('waitScoringMovieDone', self.uniqueName('waitScoringMovieDone'), self.avIdList, scoreMovieDuration + MinigameGlobals.latencyTolerance, allToonsScoringMovieDone, handleTimeout)
def exitScoreMatch(self):
self.scoringMovieDoneBarrier.cleanup()
self.scoringMovieDoneBarrier = None
return
def enterFinalResults(self):
self.checkScores()
self.sendUpdate('setNewState', ['finalResults'])
taskMgr.doMethodLater(IceGameGlobals.ShowScoresDuration, self.__doneShowingScores, self.taskName('waitShowScores'))
def exitFinalResults(self):
taskMgr.remove(self.taskName('waitShowScores'))
def __doneShowingScores(self, task):
self.notify.debug('doneShowingScores')
self.gameOver()
return Task.done
def waitClientsChoicesTimeout(self, task):
self.notify.debug('waitClientsChoicesTimeout: did not hear from all clients')
for avId in self.avatarChoices.keys():
if self.avatarChoices[avId] == (-1, 0):
self.avatarChoices[avId] = (0, 0)
self.gameFSM.request('processChoices')
return Task.done
def resetChoices(self):
for avId in self.avIdList:
self.avatarChoices[avId] = (-1, 0)
def setAvatarChoice(self, force, direction):
avatarId = self.air.getAvatarIdFromSender()
self.notify.debug('setAvatarChoice: avatar: ' + str(avatarId) + ' votes: ' + str(force) + ' direction: ' + str(direction))
self.avatarChoices[avatarId] = self.checkChoice(avatarId, force, direction)
if self.allAvatarsChosen():
self.notify.debug('setAvatarChoice: all avatars have chosen')
self.gameFSM.request('processChoices')
else:
self.notify.debug('setAvatarChoice: still waiting for more choices')
def checkChoice(self, avId, force, direction):
retForce = force
retDir = direction
if retForce < 0:
retForce = 0
if retForce > 100:
retForce = 100
return (retForce, retDir)
def allAvatarsChosen(self):
for avId in self.avatarChoices.keys():
choice = self.avatarChoices[avId]
if choice[0] == -1 and not self.stateDict[avId] == DistributedMinigameAI.EXITED:
return False
return True
def endingPositions(self, positions):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
self.notify.debug('got endingPositions from client %s' % positions)
avId = self.air.getAvatarIdFromSender()
self.avatarEndingPositions[avId] = positions
if self.allAvatarsSentEndingPositions():
self.gameFSM.request('processEndingPositions')
def allAvatarsSentEndingPositions(self):
if len(self.avatarEndingPositions) == len(self.avIdList):
return True
return False
def endingPositionsTimeout(self, task):
self.notify.debug('endingPositionsTimeout : did not hear from all clients')
self.gameFSM.request('processEndingPositions')
return Task.done
def reportScoringMovieDone(self):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'scoreMatch':
return
avId = self.air.getAvatarIdFromSender()
self.notify.debug('reportScoringMovieDone: avatar %s is done' % avId)
self.scoringMovieDoneBarrier.clear(avId)
def claimTreasure(self, treasureNum):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
avId = self.air.getAvatarIdFromSender()
if not self.scoreDict.has_key(avId):
self.notify.warning('PROBLEM: avatar %s called claimTreasure(%s) but he is not in the scoreDict: %s. avIdList is: %s' % (avId,
treasureNum,
self.scoreDict,
self.avIdList))
return
if treasureNum < 0 or treasureNum >= self.numTreasures:
self.air.writeServerEvent('warning', treasureNum, 'MazeGameAI.claimTreasure treasureNum out of range')
return
if self.takenTreasuresTable[treasureNum]:
return
self.takenTreasuresTable[treasureNum] = 1
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('setTreasureGrabbed', [avId, treasureNum])
self.scoreDict[avId] += 1
self.numTreasuresTaken += 1
def claimPenalty(self, penaltyNum):
if not self.gameFSM or not self.gameFSM.getCurrentState() or self.gameFSM.getCurrentState().getName() != 'waitEndingPositions':
return
avId = self.air.getAvatarIdFromSender()
if not self.scoreDict.has_key(avId):
self.notify.warning('PROBLEM: avatar %s called claimPenalty(%s) but he is not in the scoreDict: %s. avIdList is: %s' % (avId,
penaltyNum,
self.scoreDict,
self.avIdList))
return
if penaltyNum < 0 or penaltyNum >= self.numPenalties:
self.air.writeServerEvent('warning', penaltyNum, 'IceGameAI.claimPenalty penaltyNum out of range')
return
if self.takenPenaltiesTable[penaltyNum]:
return
self.takenPenaltiesTable[penaltyNum] = 1
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('setPenaltyGrabbed', [avId, penaltyNum])
self.scoreDict[avId] -= 1
self.numPenaltiesTaken += 1
def checkScores(self):
self.scoresAsList = []
for index in xrange(len(self.avIdList)):
avId = self.avIdList[index]
if self.scoreDict[avId] < 0:
self.scoreDict[avId] = 1
self.scoresAsList.append(self.scoreDict[avId])
| mit |
msbeta/apollo | modules/tools/prediction/data_pipelines/common/rotation2d.py | 3 | 1121 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from math import cos, sin
from vector2d import Vector2
def rotate(v, theta):
cos_theta = cos(theta)
sin_theta = sin(theta)
return rotate_fast(v, cos_theta, sin_theta)
def rotate_fast(v, cos_theta, sin_theta):
x = cos_theta * v.x - sin_theta * v.y
y = sin_theta * v.x + cos_theta * v.y
return Vector2(x, y)
| apache-2.0 |
ikrauchanka/flask-jsonrpc | examples/flask-httpauth/auth.py | 2 | 2733 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Cenobit Technologies, Inc. http://cenobit.es/
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Cenobit Technologies nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from flask import Flask
from flask.ext.httpauth import HTTPBasicAuth
PROJECT_DIR, PROJECT_MODULE_NAME = os.path.split(
os.path.dirname(os.path.realpath(__file__))
)
FLASK_JSONRPC_PROJECT_DIR = os.path.join(PROJECT_DIR, os.pardir)
if os.path.exists(FLASK_JSONRPC_PROJECT_DIR) \
and not FLASK_JSONRPC_PROJECT_DIR in sys.path:
sys.path.append(FLASK_JSONRPC_PROJECT_DIR)
from flask_jsonrpc import JSONRPC
app = Flask(__name__)
auth = HTTPBasicAuth()
jsonrpc = JSONRPC(app, '/api', enable_web_browsable_api=True)
users = {
'john': 'hello',
'susan': 'bye'
}
@auth.get_password
def get_pw(username):
if username in users:
return users.get(username)
return None
@auth.verify_password
def verify_pwd(username, password):
return users.get(username) == password
@jsonrpc.method('App.index')
@auth.login_required
def index():
return u'Welcome to Flask JSON-RPC'
@jsonrpc.method('App.hello')
@auth.login_required
def hello(name):
return u'Hello {0}'.format(name)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| bsd-3-clause |
KontorConsulting/odoo | addons/account_accountant/__init__.py | 892 | 1046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
willzhang05/postgrestesting1 | postgrestesting1/lib/python3.5/site-packages/django/conf/locale/it/formats.py | 115 | 2079 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i' # 14:30
DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
hifly/OpenUpgrade | addons/account/wizard/account_report_general_journal.py | 378 | 1697 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_general_journal(osv.osv_memory):
_inherit = "account.common.journal.report"
_name = 'account.general.journal'
_description = 'Account General Journal'
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_general_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_generaljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kidaa/aurora | src/test/python/apache/thermos/monitoring/test_resource.py | 8 | 3705 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time
from unittest import TestCase
import mock
from apache.thermos.monitoring.monitor import TaskMonitor
from apache.thermos.monitoring.process import ProcessSample
from apache.thermos.monitoring.resource import (
ResourceHistory,
ResourceMonitorBase,
TaskResourceMonitor
)
from gen.apache.thermos.ttypes import ProcessStatus
class TestResourceHistory(TestCase):
def setUp(self):
self.max_len = 4
self.resource_history = ResourceHistory(self.max_len)
def test_add(self):
next_resource_stamp = time() + 100
value = ResourceMonitorBase.ResourceResult(1, 1, 0)
assert (next_resource_stamp, value) not in self.resource_history._values
self.resource_history.add(next_resource_stamp, value)
assert (next_resource_stamp, value) == self.resource_history._values[1]
def test_add_prevents_old_entries(self):
with self.assertRaises(ValueError):
self.resource_history.add(-1, 10)
def test_get(self):
resource_stamp = time() + 100
value = ResourceMonitorBase.ResourceResult(1, 1, 0)
value_wrong = ResourceMonitorBase.ResourceResult(1, 1, 50)
self.resource_history.add(resource_stamp, value)
self.resource_history.add(resource_stamp + 1000, value_wrong)
self.resource_history.add(resource_stamp + 10000, value_wrong)
assert resource_stamp, value == self.resource_history.get(resource_stamp)
class TestTaskResouceMonitor(TestCase):
@mock.patch('apache.thermos.monitoring.process_collector_psutil.ProcessTreeCollector.sample',
autospec=True, spec_set=True)
@mock.patch('apache.thermos.monitoring.monitor.TaskMonitor.get_active_processes',
autospec=True, spec_set=True)
def test_sample_by_process(self, mock_get_active_processes, mock_sample):
fake_process_name = 'fake-process-name'
task_path = '.'
task_monitor = TaskMonitor(task_path, 'fake-task-id')
fake_process_status = ProcessStatus(process=fake_process_name)
mock_get_active_processes.return_value = [(fake_process_status, 1)]
fake_process_sample = ProcessSample.empty()
mock_sample.return_value = fake_process_sample
task_resource_monitor = TaskResourceMonitor('fake-task-id', task_monitor)
assert task_resource_monitor.name == 'TaskResourceMonitor[fake-task-id]'
assert fake_process_sample == task_resource_monitor.sample_by_process(fake_process_name)
assert mock_get_active_processes.mock_calls == [mock.call(task_monitor)]
assert mock_sample.mock_calls == [mock.call(
task_resource_monitor._process_collectors[fake_process_status])]
@mock.patch('apache.thermos.monitoring.monitor.TaskMonitor.get_active_processes',
autospec=True, spec_set=True)
def test_sample_by_process_no_process(self, mock_get_active_processes):
task_path = '.'
task_monitor = TaskMonitor(task_path, 'fake-task-id')
mock_get_active_processes.return_value = []
task_resource_monitor = TaskResourceMonitor('fake-task-id', task_monitor)
with self.assertRaises(ValueError):
task_resource_monitor.sample_by_process('fake-process-name')
assert mock_get_active_processes.mock_calls == [mock.call(task_monitor)]
| apache-2.0 |
sanderator/lab-o-matic | test/test_lab_o_matic/test_compiler.py | 1 | 1208 | '''
Created on May 8, 2011
@author: sander
'''
import os.path
import shutil
import sys
'''
nose.tools has to be imported into the Eclipse project, eg, from
/usr/local/lib/python2.6/dist-packages/nose-1.0.0-py2.6.egg/nose/tools.py
'''
from tools import with_setup, raises, nottest
import lab_o_matic.compiler
paths = {}
def setup_func():
'''Creates fixtures.
Note that nose doesn't work properly if this function is just called setup.
'''
paths['projects'] = os.path.join(os.path.dirname(__file__), '../data')
paths['student'] = 'stoodent_src'
paths['bytecode'] = os.path.join(paths['projects'], '%s/build/classes' % paths['student'])
def teardown_func():
'''Removes fixtures.
Note that nose doesn't work properly if this function is just called teardown.
'''
# shutil.rmtree(os.path.join(paths['projects'], paths['student'] + '/src'))
@with_setup(setup_func, teardown_func)
def test_compile():
'''
Compiles any .java files it finds.
The paths argument determines where to look for source files and
where to put generated bytecode .class files.
'''
assert lab_o_matic.compiler.compile(paths)
assert os.path.exists(paths['bytecode'])
| gpl-2.0 |
anair13/where-am-i | flaskapp/flask/bin/activate_this.py | 669 | 1129 | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ['PATH']
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
| mit |
cs207-project/TimeSeries | tsdb/tsdb_constants.py | 1 | 1455 | ####################################################
#
# This file records all the constant variables used
# in tsdb module
#
####################################################
OPMAP = {
'<': 0,
'>': 1,
'==': 2,
'!=': 3,
'<=': 4,
'>=': 5
}
FILES_DIR = 'persistent_files'
MAX_CARD = 8
INDEXES = {
1: None, #Binary Tree
2: None #bitmask
}
TYPES = {
'float': 'd',
'bool': '?',
'int': 'i',
}
TYPE_DEFAULT = {
'float': 0.0,
'bool': False,
'int': 0
}
TS_FIELD_LENGTH = 4
BYTES_PER_NUM = 8
REFRESH_RATE = 50
TS_LENGTH = 100
NUMVPS = 5
schema_type = {
'pk': {'type': 'string', 'index': None},
'ts': {'type': None, 'index': None},
'order': {'type': 'int', 'index': 2, 'values': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]},
'blarg': {'type': 'int', 'index': 2, 'values': [1, 2]},
'mean': {'type': 'float', 'index': 1},
'std': {'type': 'float', 'index': 1},
'vp': {'type': 'bool', 'index': 2, 'values': [0,1]},
'd-vp1': {'type': 'float', 'index': 1}
}
identity = lambda x: x
schema_convert = {
'pk': {'convert': identity, 'index': None},
'ts': {'convert': identity, 'index': None},
'order': {'convert': int, 'index': 1},
'blarg': {'convert': int, 'index': 1},
'useless': {'convert': identity, 'index': None},
'mean': {'convert': float, 'index': 1},
'std': {'convert': float, 'index': 1},
'vp': {'convert': bool, 'index': 1}
}
| mit |
esthermm/odoomrp-wip | mrp_subcontracting/models/procurement_order.py | 25 | 1258 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
mrp_operation = fields.Many2one(
'mrp.production.workcenter.line', 'MRP Operation')
@api.multi
def make_po(self):
purchase_line_obj = self.env['purchase.order.line']
res = super(ProcurementOrder, self).make_po()
for procurement in self:
if res[procurement.id]:
purchase_line = purchase_line_obj.browse(res[procurement.id])
if (procurement.mrp_operation and
(not purchase_line.order_id.mrp_operation or
procurement.mrp_operation.id !=
purchase_line.order_id.mrp_operation.id)):
purchase_line.order_id.mrp_operation = (
procurement.mrp_operation.id)
procurement.mrp_operation.purchase_order = (
purchase_line.order_id.id)
return res
| agpl-3.0 |
sirinath/root | interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/test_token_kind.py | 97 | 1064 | from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
def test_constructor():
"""Ensure TokenKind constructor works as expected."""
t = TokenKind(5, 'foo')
eq_(t.value, 5)
eq_(t.name, 'foo')
@raises(ValueError)
def test_bad_register():
"""Ensure a duplicate value is rejected for registration."""
TokenKind.register(2, 'foo')
@raises(ValueError)
def test_unknown_value():
"""Ensure trying to fetch an unknown value raises."""
TokenKind.from_value(-1)
def test_registration():
"""Ensure that items registered appear as class attributes."""
ok_(hasattr(TokenKind, 'LITERAL'))
literal = TokenKind.LITERAL
ok_(isinstance(literal, TokenKind))
def test_from_value():
"""Ensure registered values can be obtained from from_value()."""
t = TokenKind.from_value(3)
ok_(isinstance(t, TokenKind))
eq_(t, TokenKind.LITERAL)
def test_repr():
"""Ensure repr() works."""
r = repr(TokenKind.LITERAL)
eq_(r, 'TokenKind.LITERAL')
| lgpl-2.1 |
glaubitz/fs-uae-debian | launcher/launcher/setup/setupwizarddialog.py | 2 | 1290 | import fsui
from launcher.i18n import gettext
from launcher.setup.setupwelcomepage import SetupWelcomePage
from launcher.ui.skin import LauncherTheme
from launcher.ui.widgets import PrevButton, NextButton, CloseButton
class SetupWizardDialog(fsui.Window):
@classmethod
def open(cls, parent=None):
return fsui.open_window_instance(cls, parent)
def __init__(self, parent):
super().__init__(
parent,
gettext("Setup Wizard"),
minimizable=False,
maximizable=False,
)
self.theme = LauncherTheme.get()
self.layout = fsui.VerticalLayout()
page = SetupWelcomePage(self)
self.layout.add(page, expand=True, fill=True)
button_layout = fsui.HorizontalLayout()
self.layout.add(button_layout, fill=True, margin=20)
button_layout.add_spacer(0, expand=True)
self.prev_button = PrevButton(self)
button_layout.add(self.prev_button, fill=True, margin_left=10)
self.next_button = NextButton(self)
button_layout.add(self.next_button, fill=True, margin_left=10)
if self.window.theme.has_close_buttons:
self.close_button = CloseButton(self)
button_layout.add(self.close_button, fill=True, margin_left=10)
| gpl-2.0 |
roderickmackenzie/gpvdm | gpvdm_gui/gui/materials_main.py | 1 | 7747 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package materials_main
# Dialog to show information about a material.
#
import os
from tab import tab_class
from icon_lib import icon_get
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget,QDialog
from PyQt5.QtGui import QPainter,QIcon
#python modules
import webbrowser
from help import help_window
from win_lin import desktop_open
from ref import ref_window
from bibtex import bibtex
from gpvdm_open import gpvdm_open
from QWidgetSavePos import QWidgetSavePos
from plot_widget import plot_widget
from ribbon_materials import ribbon_materials
from import_data import import_data
from equation_editor import equation_editor
articles = []
mesh_articles = []
class materials_main(QWidgetSavePos):
def changed_click(self):
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Electrical parameters"):
help_window().help_set_help(["tab.png",_("<big><b>Electrical parameters</b></big><br>Use this tab to configure the electrical parameters for the material.")])
self.ribbon.tb_save.setEnabled(False)
self.ribbon.import_data.setEnabled(False)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Luminescence"):
help_window().help_set_help(["tab.png",_("<big><b>Luminescence</b></big><br>Use this tab to edit the materials Luminescence.")])
self.ribbon.tb_save.setEnabled(False)
self.ribbon.import_data.setEnabled(False)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
b=bibtex()
if b.load(os.path.join(self.path,"mat.bib"))!=False:
text=b.get_text_of_token("alpha",html=True)
if text!=False:
help_window().help_set_help(["alpha.png",_("<big><b>Absorption</b></big><br>"+text)])
self.ribbon.tb_save.setEnabled(True)
self.ribbon.import_data.setEnabled(True)
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
b=bibtex()
if b.load(os.path.join(self.path,"mat.bib"))!=False:
text=b.get_text_of_token("n",html=True)
if text!=False:
help_window().help_set_help(["n.png",_("<big><b>Refractive index</b></big><br>"+text)])
self.ribbon.tb_save.setEnabled(True)
self.ribbon.import_data.setEnabled(True)
def callback_cost(self):
desktop_open(os.path.join(self.path,"cost.xlsx"))
def callback_help(self):
webbrowser.open("https://www.gpvdm.com/man/index.html")
def __init__(self,path):
QWidgetSavePos.__init__(self,"materials_main")
self.path=path
self.setFixedSize(900, 600)
self.setWindowIcon(icon_get("organic_material"))
self.setWindowTitle(_("Material editor")+" (https://www.gpvdm.com)"+" "+os.path.basename(self.path))
self.main_vbox = QVBoxLayout()
self.ribbon=ribbon_materials()
self.ribbon.cost.triggered.connect(self.callback_cost)
self.ribbon.folder_open.triggered.connect(self.callback_dir_open)
self.ribbon.import_data.clicked.connect(self.import_data)
self.ribbon.equation.clicked.connect(self.callback_equation_editor)
self.ribbon.tb_ref.triggered.connect(self.callback_ref)
self.ribbon.help.triggered.connect(self.callback_help)
self.main_vbox.addWidget(self.ribbon)
self.notebook = QTabWidget()
self.notebook.setMovable(True)
self.main_vbox.addWidget(self.notebook)
fname=os.path.join(self.path,"alpha.gmat")
self.alpha=plot_widget(enable_toolbar=False)
self.alpha.set_labels([_("Absorption")])
self.alpha.load_data([fname])
self.alpha.do_plot()
self.notebook.addTab(self.alpha,_("Absorption"))
fname=os.path.join(self.path,"n.gmat")
self.n=plot_widget(enable_toolbar=False)
self.n.set_labels([_("Refractive index")])
self.n.load_data([fname])
self.n.do_plot()
self.notebook.addTab(self.n,_("Refractive index"))
files=["dos.inp","pl.inp","mat.inp"]
description=[_("Electrical parameters"),_("Luminescence"),_("Basic")]
for i in range(0,len(files)):
full_path=os.path.join(self.path,files[i])
if os.path.isfile(full_path)==True:
tab=tab_class(os.path.join(self.path,files[i]))
self.notebook.addTab(tab,description[i])
self.setLayout(self.main_vbox)
self.notebook.currentChanged.connect(self.changed_click)
def callback_equation_editor(self):
equation_file=None
file_name=None
data_label=""
data_units=""
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
equation_file="alpha_eq.inp"
data_label="Absorption"
data_units="m^{-1}"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
equation_file="n_eq.inp"
data_label="n"
data_units="au"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.equation_editor=equation_editor(self.path,equation_file,file_name)
self.equation_editor.data_written.connect(self.update)
self.equation_editor.data.y_label="Wavelength"
self.equation_editor.data.data_label=data_label
self.equation_editor.data.y_units="nm"
self.equation_editor.data.data_units=data_units
self.equation_editor.load()
self.equation_editor.show()
def import_data(self):
file_name=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.im=import_data(output_file,config_file)
self.im.run()
self.update()
def import_ref(self):
file_name=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
file_name="alpha.gmat"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
file_name="n.gmat"
if file_name!=None:
output_file=os.path.join(self.path,file_name)
config_file=os.path.join(self.path,file_name+"import.inp")
self.im=import_data(output_file,config_file)
self.im.run()
self.update()
def update(self):
self.n.update()
self.alpha.update()
def callback_ref(self):
token=None
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Absorption"):
token="alpha"
if self.notebook.tabText(self.notebook.currentIndex()).strip()==_("Refractive index"):
token="n"
if token!=None:
self.ref_window=ref_window(os.path.join(self.path,"mat.bib"),token)
self.ref_window.show()
def callback_dir_open(self):
dialog=gpvdm_open(self.path)
dialog.show_inp_files=False
ret=dialog.exec_()
if ret==QDialog.Accepted:
desktop_open(dialog.get_filename())
| gpl-2.0 |
rickerc/cinder_audit | cinder/tests/db/test_name_id.py | 5 | 2344 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for volume name_id."""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import test
from cinder.tests import utils as testutils
CONF = cfg.CONF
class NameIDsTestCase(test.TestCase):
"""Test cases for naming volumes with name_id."""
def setUp(self):
super(NameIDsTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id')
def tearDown(self):
super(NameIDsTestCase, self).tearDown()
def test_name_id_same(self):
"""New volume should have same 'id' and 'name_id'."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
self.assertEqual(vol_ref['name_id'], vol_ref['id'])
expected_name = CONF.volume_name_template % vol_ref['id']
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_diff(self):
"""Change name ID to mimic volume after migration."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
vol_ref = db.volume_get(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_snapshot_volume_name(self):
"""Make sure snapshot['volume_name'] is updated."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(snap_ref['volume_name'], expected_name)
| apache-2.0 |
jpshort/odoo | marcos_addons/marcos_l10n_do/__init__.py | 3 | 1085 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
widelands/widelands | utils/fix_formatting.py | 1 | 4107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The code base had inconsistent usage of tabs/spaces for indenting in Lua.
files. Spaces were more prominent - and I prefer them over tabs. So I wrote
this small script to fix leading tabs in Lua files to spaces.
It also saves files in unix file endings ("\r\n") and strips empty lines at the
end of files and whitespace characters at the end of lines.
After fixing the Lua tabs, this script also executes clang-format over the src
directory and pyformat over the utils directory.
"""
import argparse
import os
import re
import sys
from subprocess import call
from file_utils import read_text_file, write_text_file, find_files
LEADING_TABS = re.compile(r'^\s*\t+\s*')
SPACES_PER_TAB = 3
def parse_args():
p = argparse.ArgumentParser(
description='Fix common whitespace errors in Lua files, run clang-format'
' over the code base and pyformat over the utils directory.'
' Recurses over all relevant files.')
p.add_argument('-c', '--c++', action='store_true',
help='Format C++ files only')
p.add_argument('-l', '--lua', action='store_true',
help='Format Lua files only')
p.add_argument('-p', '--python', action='store_true',
help='Format Python files only')
p.add_argument('-d', '--dir', action='store',
help='Format the given directory and its subdirectories only')
return vars(p.parse_args())
def main():
args = parse_args()
format_cplusplus = args['c++'] or not (args['lua'] or args['python'])
format_lua = args['lua'] or not (args['c++'] or args['python'])
format_python = args['python'] or not (args['c++'] or args['lua'])
if not os.path.isdir('src') or not os.path.isdir('utils'):
print('CWD is not the root of the repository.')
return 1
if format_cplusplus:
directory = args['dir']
if not directory:
directory = './src'
sys.stdout.write('\nFormatting C++ in directory: ' + directory + ' ')
for filename in find_files(directory, ['.cc', '.h']):
if 'third_party' in filename:
continue
sys.stdout.write('.')
sys.stdout.flush()
call(['clang-format', '-i', filename])
call(['git', 'add', '--renormalize', filename])
print(' done.')
if format_lua:
directories = set()
if args['dir']:
directories.add(args['dir'])
else:
directories = {'./data', './test'}
for directory in directories:
sys.stdout.write(
'\nFixing Lua tabs in directory: ' + directory + ' ')
for filename in find_files(directory, ['.lua']):
sys.stdout.write('.')
sys.stdout.flush()
lines = read_text_file(filename).strip().split('\n')
new_lines = []
for line in lines:
m = LEADING_TABS.match(line)
if m is not None:
line = line[m.start():m.end()].expandtabs(
SPACES_PER_TAB) + line[m.end():]
new_lines.append(line.rstrip() + '\n')
write_text_file(filename, ''.join(new_lines))
call(['git', 'add', '--renormalize', filename])
print(' done.')
if format_python:
directories = set()
if args['dir']:
directories.add(args['dir'])
else:
directories = {'./utils', './cmake/codecheck'}
for directory in directories:
sys.stdout.write(
'\nFormatting Python scripts in directory: ' + directory + ' ')
for filename in find_files(directory, ['.py']):
sys.stdout.write('.')
sys.stdout.flush()
call(['pyformat', '-i', filename])
call(['git', 'add', '--renormalize', filename])
print(' done.')
print('Formatting finished.')
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
louyihua/edx-platform | lms/djangoapps/badges/migrations/0001_initial.py | 17 | 3499 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import badges.models
from django.conf import settings
import django.utils.timezone
from model_utils import fields
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BadgeAssertion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', jsonfield.fields.JSONField()),
('backend', models.CharField(max_length=50)),
('image_url', models.URLField()),
('assertion_url', models.URLField()),
('modified', fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created', fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False, db_index=True)),
],
),
migrations.CreateModel(
name='BadgeClass',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(max_length=255, validators=[badges.models.validate_lowercase])),
('issuing_component', models.SlugField(default=b'', blank=True, validators=[badges.models.validate_lowercase])),
('display_name', models.CharField(max_length=255)),
('course_id', xmodule_django.models.CourseKeyField(default=None, max_length=255, blank=True)),
('description', models.TextField()),
('criteria', models.TextField()),
('mode', models.CharField(default=b'', max_length=100, blank=True)),
('image', models.ImageField(upload_to=b'badge_classes', validators=[badges.models.validate_badge_image])),
],
),
migrations.CreateModel(
name='CourseCompleteImageConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mode', models.CharField(help_text='The course mode for this badge image. For example, "verified" or "honor".', unique=True, max_length=125)),
('icon', models.ImageField(help_text='Badge images must be square PNG files. The file size should be under 250KB.', upload_to=b'course_complete_badges', validators=[badges.models.validate_badge_image])),
('default', models.BooleanField(default=False, help_text='Set this value to True if you want this image to be the default image for any course modes that do not have a specified badge image. You can have only one default image.')),
],
),
migrations.AlterUniqueTogether(
name='badgeclass',
unique_together=set([('slug', 'issuing_component', 'course_id')]),
),
migrations.AddField(
model_name='badgeassertion',
name='badge_class',
field=models.ForeignKey(to='badges.BadgeClass'),
),
migrations.AddField(
model_name='badgeassertion',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
| agpl-3.0 |
drglove/SickRage | sickbeard/clients/rtorrent.py | 2 | 5258 | # Author: jkaberg <[email protected]>, based on fuzemans work (https://github.com/RuudBurger/CouchPotatoServer/blob/develop/couchpotato/core/downloaders/rtorrent/main.py)
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from base64 import b64encode
import traceback
import sickbeard
from sickbeard import logger
from sickbeard.clients.generic import GenericClient
from lib.rtorrent import RTorrent
from lib.rtorrent.err import MethodError
class rTorrentAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(rTorrentAPI, self).__init__('rTorrent', host, username, password)
def _get_auth(self):
self.auth = None
if self.auth is not None:
return self.auth
if not self.host:
return
tp_kwargs = {}
if sickbeard.TORRENT_AUTH_TYPE is not 'none':
tp_kwargs['authtype'] = sickbeard.TORRENT_AUTH_TYPE
if not sickbeard.TORRENT_VERIFY_CERT:
tp_kwargs['check_ssl_cert'] = False
if self.username and self.password:
self.auth = RTorrent(self.host, self.username, self.password, True, tp_kwargs=tp_kwargs)
else:
self.auth = RTorrent(self.host, None, None, True)
return self.auth
def _add_torrent_uri(self, result):
filedata = None
if not self.auth:
return False
if not result:
return False
try:
# Send magnet to rTorrent
torrent = self.auth.load_magnet(result.url, result.hash)
if not torrent:
return False
# Set label
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if label:
torrent.set_custom(1, label.lower())
if sickbeard.TORRENT_PATH:
torrent.set_directory(sickbeard.TORRENT_PATH)
# Start torrent
torrent.start()
return True
except Exception as e:
logger.log(traceback.format_exc(), logger.DEBUG)
return False
def _add_torrent_file(self, result):
filedata = None
if not self.auth:
return False
if not result:
return False
# group_name = 'sb_test'.lower() ##### Use provider instead of _test
# if not self._set_torrent_ratio(group_name):
# return False
# Send request to rTorrent
try:
# Send torrent to rTorrent
torrent = self.auth.load_torrent(result.content)
if not torrent:
return False
# Set label
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if label:
torrent.set_custom(1, label.lower())
if sickbeard.TORRENT_PATH:
torrent.set_directory(sickbeard.TORRENT_PATH)
# Set Ratio Group
# torrent.set_visible(group_name)
# Start torrent
torrent.start()
return True
except Exception as e:
logger.log(traceback.format_exc(), logger.DEBUG)
return False
def _set_torrent_ratio(self, name):
# if not name:
# return False
#
# if not self.auth:
# return False
#
# views = self.auth.get_views()
#
# if name not in views:
# self.auth.create_group(name)
# group = self.auth.get_group(name)
# ratio = int(float(sickbeard.TORRENT_RATIO) * 100)
#
# try:
# if ratio > 0:
#
# # Explicitly set all group options to ensure it is setup correctly
# group.set_upload('1M')
# group.set_min(ratio)
# group.set_max(ratio)
# group.set_command('d.stop')
# group.enable()
# else:
# # Reset group action and disable it
# group.set_command()
# group.disable()
#
# except:
# return False
return True
def testAuthentication(self):
try:
self._get_auth()
if self.auth is not None:
return True, 'Success: Connected and Authenticated'
else:
return False, 'Error: Unable to get ' + self.name + ' Authentication, check your config!'
except Exception:
return False, 'Error: Unable to connect to ' + self.name
api = rTorrentAPI()
| gpl-3.0 |
zoggn/kernel_tcl_msm8610 | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
mavenlin/tensorflow | tensorflow/python/debug/cli/cli_shared_test.py | 45 | 15872 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the shared functions and classes for tfdbg CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class BytesToReadableStrTest(test_util.TensorFlowTestCase):
def testNoneSizeWorks(self):
self.assertEqual(str(None), cli_shared.bytes_to_readable_str(None))
def testSizesBelowOneKiloByteWorks(self):
self.assertEqual("0", cli_shared.bytes_to_readable_str(0))
self.assertEqual("500", cli_shared.bytes_to_readable_str(500))
self.assertEqual("1023", cli_shared.bytes_to_readable_str(1023))
def testSizesBetweenOneKiloByteandOneMegaByteWorks(self):
self.assertEqual("1.00k", cli_shared.bytes_to_readable_str(1024))
self.assertEqual("2.40k", cli_shared.bytes_to_readable_str(int(1024 * 2.4)))
self.assertEqual("1023.00k", cli_shared.bytes_to_readable_str(1024 * 1023))
def testSizesBetweenOneMegaByteandOneGigaByteWorks(self):
self.assertEqual("1.00M", cli_shared.bytes_to_readable_str(1024**2))
self.assertEqual("2.40M",
cli_shared.bytes_to_readable_str(int(1024**2 * 2.4)))
self.assertEqual("1023.00M",
cli_shared.bytes_to_readable_str(1024**2 * 1023))
def testSizeAboveOneGigaByteWorks(self):
self.assertEqual("1.00G", cli_shared.bytes_to_readable_str(1024**3))
self.assertEqual("2000.00G",
cli_shared.bytes_to_readable_str(1024**3 * 2000))
def testReadableStrIncludesBAtTheEndOnRequest(self):
self.assertEqual("0B", cli_shared.bytes_to_readable_str(0, include_b=True))
self.assertEqual(
"1.00kB", cli_shared.bytes_to_readable_str(
1024, include_b=True))
self.assertEqual(
"1.00MB", cli_shared.bytes_to_readable_str(
1024**2, include_b=True))
self.assertEqual(
"1.00GB", cli_shared.bytes_to_readable_str(
1024**3, include_b=True))
class TimeToReadableStrTest(test_util.TensorFlowTestCase):
def testNoneTimeWorks(self):
self.assertEqual("0", cli_shared.time_to_readable_str(None))
def testMicrosecondsTime(self):
self.assertEqual("40us", cli_shared.time_to_readable_str(40))
def testMillisecondTime(self):
self.assertEqual("40ms", cli_shared.time_to_readable_str(40e3))
def testSecondTime(self):
self.assertEqual("40s", cli_shared.time_to_readable_str(40e6))
def testForceTimeUnit(self):
self.assertEqual("40s",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_S))
self.assertEqual("40000ms",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_MS))
self.assertEqual("40000000us",
cli_shared.time_to_readable_str(
40e6, force_time_unit=cli_shared.TIME_UNIT_US))
self.assertEqual("4e-05s",
cli_shared.time_to_readable_str(
40, force_time_unit=cli_shared.TIME_UNIT_S))
self.assertEqual("0",
cli_shared.time_to_readable_str(
0, force_time_unit=cli_shared.TIME_UNIT_S))
with self.assertRaisesRegexp(ValueError, r"Invalid time unit: ks"):
cli_shared.time_to_readable_str(100, force_time_unit="ks")
class GetRunStartIntroAndDescriptionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.const_a = constant_op.constant(11.0, name="a")
self.const_b = constant_op.constant(22.0, name="b")
self.const_c = constant_op.constant(33.0, name="c")
self.sparse_d = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[1.0, 2.0], dense_shape=[3, 3])
def tearDown(self):
ops.reset_default_graph()
def testSingleFetchNoFeeds(self):
run_start_intro = cli_shared.get_run_start_intro(12, self.const_a, None, {})
# Verify line about run() call number.
self.assertTrue(run_start_intro.lines[1].endswith("run() call #12:"))
# Verify line about fetch.
const_a_name_line = run_start_intro.lines[4]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
# Verify line about feeds.
feeds_line = run_start_intro.lines[7]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify lines about possible commands and their font attributes.
self.assertEqual("run:", run_start_intro.lines[11][2:])
annot = run_start_intro.font_attr_segs[11][0]
self.assertEqual(2, annot[0])
self.assertEqual(5, annot[1])
self.assertEqual("run", annot[2][0].content)
self.assertEqual("bold", annot[2][1])
annot = run_start_intro.font_attr_segs[13][0]
self.assertEqual(2, annot[0])
self.assertEqual(8, annot[1])
self.assertEqual("run -n", annot[2][0].content)
self.assertEqual("bold", annot[2][1])
self.assertEqual("run -t <T>:", run_start_intro.lines[15][2:])
self.assertEqual([(2, 12, "bold")], run_start_intro.font_attr_segs[15])
self.assertEqual("run -f <filter_name>:", run_start_intro.lines[17][2:])
self.assertEqual([(2, 22, "bold")], run_start_intro.font_attr_segs[17])
annot = run_start_intro.font_attr_segs[21][0]
self.assertEqual(2, annot[0])
self.assertEqual(16, annot[1])
self.assertEqual("invoke_stepper", annot[2][0].content)
# Verify short description.
description = cli_shared.get_run_short_description(12, self.const_a, None)
self.assertEqual("run #12: 1 fetch (a:0); 0 feeds", description)
# Verify the main menu associated with the run_start_intro.
self.assertIn(debugger_cli_common.MAIN_MENU_KEY,
run_start_intro.annotations)
menu = run_start_intro.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertEqual("run", menu.caption_to_item("run").content)
self.assertEqual("invoke_stepper",
menu.caption_to_item("invoke_stepper").content)
self.assertEqual("exit", menu.caption_to_item("exit").content)
def testSparseTensorAsFeedShouldHandleNoNameAttribute(self):
sparse_feed_val = ([[0, 0], [1, 1]], [10.0, 20.0])
run_start_intro = cli_shared.get_run_start_intro(
1, self.sparse_d, {self.sparse_d: sparse_feed_val}, {})
self.assertEqual(str(self.sparse_d), run_start_intro.lines[7].strip())
short_description = cli_shared.get_run_short_description(
1, self.sparse_d, {self.sparse_d: sparse_feed_val})
self.assertEqual(
"run #1: 1 fetch; 1 feed (%s)" % self.sparse_d, short_description)
def testSparseTensorAsFetchShouldHandleNoNameAttribute(self):
run_start_intro = cli_shared.get_run_start_intro(1, self.sparse_d, None, {})
self.assertEqual(str(self.sparse_d), run_start_intro.lines[4].strip())
def testTwoFetchesListNoFeeds(self):
fetches = [self.const_a, self.const_b]
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_a_name_line = run_start_intro.lines[4]
const_b_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
self.assertEqual(self.const_b.name, const_b_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testNestedListAsFetches(self):
fetches = [self.const_c, [self.const_a, self.const_b]]
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
# Verify lines about the fetches.
self.assertEqual(self.const_c.name, run_start_intro.lines[4].strip())
self.assertEqual(self.const_a.name, run_start_intro.lines[5].strip())
self.assertEqual(self.const_b.name, run_start_intro.lines[6].strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 3 fetches; 0 feeds", description)
def testNestedDictAsFetches(self):
fetches = {"c": self.const_c, "ab": {"a": self.const_a, "b": self.const_b}}
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
# Verify lines about the fetches. The ordering of the dict keys is
# indeterminate.
fetch_names = set()
fetch_names.add(run_start_intro.lines[4].strip())
fetch_names.add(run_start_intro.lines[5].strip())
fetch_names.add(run_start_intro.lines[6].strip())
self.assertEqual({"a:0", "b:0", "c:0"}, fetch_names)
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 3 fetches; 0 feeds", description)
def testTwoFetchesAsTupleNoFeeds(self):
fetches = (self.const_a, self.const_b)
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_a_name_line = run_start_intro.lines[4]
const_b_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_a.name, const_a_name_line.strip())
self.assertEqual(self.const_b.name, const_b_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testTwoFetchesAsNamedTupleNoFeeds(self):
fetches_namedtuple = namedtuple("fetches", "x y")
fetches = fetches_namedtuple(self.const_b, self.const_c)
run_start_intro = cli_shared.get_run_start_intro(1, fetches, None, {})
const_b_name_line = run_start_intro.lines[4]
const_c_name_line = run_start_intro.lines[5]
self.assertEqual(self.const_b.name, const_b_name_line.strip())
self.assertEqual(self.const_c.name, const_c_name_line.strip())
feeds_line = run_start_intro.lines[8]
self.assertEqual("(Empty)", feeds_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, fetches, None)
self.assertEqual("run #1: 2 fetches; 0 feeds", description)
def testWithFeedDict(self):
feed_dict = {
self.const_a: 10.0,
self.const_b: 20.0,
}
run_start_intro = cli_shared.get_run_start_intro(1, self.const_c, feed_dict,
{})
const_c_name_line = run_start_intro.lines[4]
self.assertEqual(self.const_c.name, const_c_name_line.strip())
# Verify lines about the feed dict.
feed_a_line = run_start_intro.lines[7]
feed_b_line = run_start_intro.lines[8]
self.assertEqual(self.const_a.name, feed_a_line.strip())
self.assertEqual(self.const_b.name, feed_b_line.strip())
# Verify short description.
description = cli_shared.get_run_short_description(1, self.const_c,
feed_dict)
self.assertEqual("run #1: 1 fetch (c:0); 2 feeds", description)
def testTensorFilters(self):
feed_dict = {self.const_a: 10.0}
tensor_filters = {
"filter_a": lambda x: True,
"filter_b": lambda x: False,
}
run_start_intro = cli_shared.get_run_start_intro(1, self.const_c, feed_dict,
tensor_filters)
# Verify the listed names of the tensor filters.
filter_names = set()
filter_names.add(run_start_intro.lines[20].split(" ")[-1])
filter_names.add(run_start_intro.lines[21].split(" ")[-1])
self.assertEqual({"filter_a", "filter_b"}, filter_names)
# Verify short description.
description = cli_shared.get_run_short_description(1, self.const_c,
feed_dict)
self.assertEqual("run #1: 1 fetch (c:0); 1 feed (a:0)", description)
# Verify the command links for the two filters.
command_set = set()
annot = run_start_intro.font_attr_segs[20][0]
command_set.add(annot[2].content)
annot = run_start_intro.font_attr_segs[21][0]
command_set.add(annot[2].content)
self.assertEqual({"run -f filter_a", "run -f filter_b"}, command_set)
def testGetRunShortDescriptionWorksForTensorFeedKey(self):
short_description = cli_shared.get_run_short_description(
1, self.const_a, {self.const_a: 42.0})
self.assertEqual("run #1: 1 fetch (a:0); 1 feed (a:0)", short_description)
def testGetRunShortDescriptionWorksForUnicodeFeedKey(self):
short_description = cli_shared.get_run_short_description(
1, self.const_a, {u"foo": 42.0})
self.assertEqual("run #1: 1 fetch (a:0); 1 feed (foo)", short_description)
class GetErrorIntroTest(test_util.TensorFlowTestCase):
def setUp(self):
self.var_a = variables.Variable(42.0, name="a")
def tearDown(self):
ops.reset_default_graph()
def testShapeError(self):
tf_error = errors.OpError(None, self.var_a.initializer, "foo description",
None)
error_intro = cli_shared.get_error_intro(tf_error)
self.assertEqual("!!! An error occurred during the run !!!",
error_intro.lines[1])
self.assertEqual([(0, len(error_intro.lines[1]), "blink")],
error_intro.font_attr_segs[1])
self.assertEqual(2, error_intro.lines[4].index("ni -a -d -t a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[4][0][0])
self.assertEqual(22, error_intro.font_attr_segs[4][0][1])
self.assertEqual("ni -a -d -t a/Assign",
error_intro.font_attr_segs[4][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[4][0][2][1])
self.assertEqual(2, error_intro.lines[6].index("li -r a/Assign"))
self.assertEqual(2, error_intro.font_attr_segs[6][0][0])
self.assertEqual(16, error_intro.font_attr_segs[6][0][1])
self.assertEqual("li -r a/Assign",
error_intro.font_attr_segs[6][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[6][0][2][1])
self.assertEqual(2, error_intro.lines[8].index("lt"))
self.assertEqual(2, error_intro.font_attr_segs[8][0][0])
self.assertEqual(4, error_intro.font_attr_segs[8][0][1])
self.assertEqual("lt", error_intro.font_attr_segs[8][0][2][0].content)
self.assertEqual("bold", error_intro.font_attr_segs[8][0][2][1])
self.assertStartsWith(error_intro.lines[11], "Op name:")
self.assertTrue(error_intro.lines[11].endswith("a/Assign"))
self.assertStartsWith(error_intro.lines[12], "Error type:")
self.assertTrue(error_intro.lines[12].endswith(str(type(tf_error))))
self.assertEqual("Details:", error_intro.lines[14])
self.assertStartsWith(error_intro.lines[15], "foo description")
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
aforalee/RRally | rally/plugins/openstack/scenarios/murano/utils.py | 2 | 10549 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import uuid
import zipfile
from oslo_config import cfg
import yaml
from rally.common import fileutils
from rally.common import utils as common_utils
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils
CONF = cfg.CONF
MURANO_BENCHMARK_OPTS = [
cfg.IntOpt("murano_delete_environment_timeout", default=180,
deprecated_name="delete_environment_timeout",
help="A timeout in seconds for an environment delete"),
cfg.IntOpt("murano_deploy_environment_timeout", default=1200,
deprecated_name="deploy_environment_timeout",
help="A timeout in seconds for an environment deploy"),
cfg.IntOpt("murano_delete_environment_check_interval", default=2,
deprecated_name="delete_environment_check_interval",
help="Delete environment check interval in seconds"),
cfg.IntOpt("murano_deploy_environment_check_interval", default=5,
deprecated_name="deploy_environment_check_interval",
help="Deploy environment check interval in seconds"),
]
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(MURANO_BENCHMARK_OPTS, group=benchmark_group)
class MuranoScenario(scenario.OpenStackScenario):
"""Base class for Murano scenarios with basic atomic actions."""
@atomic.action_timer("murano.list_environments")
def _list_environments(self):
"""Return environments list."""
return self.clients("murano").environments.list()
@atomic.action_timer("murano.create_environment")
def _create_environment(self, env_name=None):
"""Create environment.
:param env_name: String used to name environment
:returns: Environment instance
"""
env_name = env_name or self._generate_random_name()
return self.clients("murano").environments.create({"name": env_name})
@atomic.action_timer("murano.delete_environment")
def _delete_environment(self, environment):
"""Delete given environment.
Return when the environment is actually deleted.
:param environment: Environment instance
"""
self.clients("murano").environments.delete(environment.id)
config = CONF.benchmark
utils.wait_for_status(
environment,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=config.murano_delete_environment_timeout,
check_interval=config.murano_delete_environment_check_interval
)
@atomic.action_timer("murano.create_session")
def _create_session(self, environment_id):
"""Create session for environment with specific id
:param environment_id: Environment id
:returns: Session instance
"""
return self.clients("murano").sessions.configure(environment_id)
@atomic.optional_action_timer("murano.create_service")
def _create_service(self, environment, session, full_package_name,
image_name=None, flavor_name=None):
"""Create Murano service.
:param environment: Environment instance
:param session: Session instance
:param full_package_name: full name of the Murano package
:param image_name: Image name
:param flavor_name: Flavor name
:param atomic_action: True if this is atomic action. added and
handled by the optional_action_timer()
decorator
:returns: Service instance
"""
app_id = str(uuid.uuid4())
data = {"?": {"id": app_id,
"type": full_package_name},
"name": self._generate_random_name("rally_")}
return self.clients("murano").services.post(
environment_id=environment.id, path="/", data=data,
session_id=session.id)
@atomic.action_timer("murano.deploy_environment")
def _deploy_environment(self, environment, session):
"""Deploy environment.
:param environment: Environment instance
:param session: Session instance
"""
self.clients("murano").sessions.deploy(environment.id,
session.id)
config = CONF.benchmark
utils.wait_for(
environment, is_ready=utils.resource_is("READY"),
update_resource=utils.get_from_manager(["DEPLOY FAILURE"]),
timeout=config.murano_deploy_environment_timeout,
check_interval=config.murano_deploy_environment_check_interval
)
@atomic.action_timer("murano.list_packages")
def _list_packages(self, include_disabled=False):
"""Returns packages list.
:param include_disabled: if "True" then disabled packages will be
included in a the result.
Default value is False.
:returns: list of imported packages
"""
return self.clients("murano").packages.list(
include_disabled=include_disabled)
@atomic.action_timer("murano.import_package")
def _import_package(self, package):
"""Import package to the Murano.
:param package: path to zip archive with Murano application
:returns: imported package
"""
package = self.clients("murano").packages.create(
{}, {"file": open(package)}
)
return package
@atomic.action_timer("murano.delete_package")
def _delete_package(self, package):
"""Delete specified package.
:param package: package that will be deleted
"""
self.clients("murano").packages.delete(package.id)
@atomic.action_timer("murano.update_package")
def _update_package(self, package, body, operation="replace"):
"""Update specified package.
:param package: package that will be updated
:param body: dict object that defines what package property will be
updated, e.g {"tags": ["tag"]} or {"enabled": "true"}
:param operation: string object that defines the way of how package
property will be updated, allowed operations are
"add", "replace" or "delete".
Default value is "replace".
:returns: updated package
"""
return self.clients("murano").packages.update(
package.id, body, operation)
@atomic.action_timer("murano.filter_applications")
def _filter_applications(self, filter_query):
"""Filter list of uploaded application by specified criteria.
:param filter_query: dict that contains filter criteria, it
will be passed as **kwargs to filter method
e.g. {"category": "Web"}
:returns: filtered list of packages
"""
return self.clients("murano").packages.filter(**filter_query)
def _zip_package(self, package_path):
"""Call _prepare_package method that returns path to zip archive."""
return MuranoPackageManager()._prepare_package(package_path)
class MuranoPackageManager(object):
@staticmethod
def _read_from_file(filename):
with open(filename, "r") as f:
read_data = f.read()
return yaml.safe_load(read_data)
@staticmethod
def _write_to_file(data, filename):
with open(filename, "w") as f:
yaml.safe_dump(data, f)
def _change_app_fullname(self, app_dir):
"""Change application full name.
To avoid name conflict error during package import (when user
tries to import a few packages into the same tenant) need to change the
application name. For doing this need to replace following parts
in manifest.yaml
from
...
FullName: app.name
...
Classes:
app.name: app_class.yaml
to:
...
FullName: <new_name>
...
Classes:
<new_name>: app_class.yaml
:param app_dir: path to directory with Murano application context
"""
new_fullname = common_utils.generate_random_name("app.")
manifest_file = os.path.join(app_dir, "manifest.yaml")
manifest = self._read_from_file(manifest_file)
class_file_name = manifest["Classes"][manifest["FullName"]]
# update manifest.yaml file
del manifest["Classes"][manifest["FullName"]]
manifest["FullName"] = new_fullname
manifest["Classes"][new_fullname] = class_file_name
self._write_to_file(manifest, manifest_file)
def _prepare_package(self, package_path):
"""Check whether the package path is path to zip archive or not.
If package_path is not a path to zip archive but path to Murano
application folder, than method prepares zip archive with Murano
application. It copies directory with Murano app files to temporary
folder, changes manifest.yaml and class file (to avoid '409 Conflict'
errors in Murano) and prepares zip package.
:param package_path: path to zip archive or directory with package
components
:returns: path to zip archive with Murano application
"""
if not zipfile.is_zipfile(package_path):
tmp_dir = tempfile.mkdtemp()
pkg_dir = os.path.join(tmp_dir, "package/")
try:
shutil.copytree(package_path, pkg_dir)
self._change_app_fullname(pkg_dir)
package_path = fileutils.pack_dir(pkg_dir)
finally:
shutil.rmtree(tmp_dir)
return package_path
| apache-2.0 |
elelsee/pycfn-elasticsearch | pycfn_elasticsearch/vendored/docutils/utils/error_reporting.py | 104 | 7765 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# :Id: $Id: error_reporting.py 7668 2013-06-04 12:46:30Z milde $
# :Copyright: © 2011 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""
Error reporting should be safe from encoding/decoding errors.
However, implicit conversions of strings and exceptions like
>>> u'%s world: %s' % ('H\xe4llo', Exception(u'H\xe4llo')
fail in some Python versions:
* In Python <= 2.6, ``unicode(<exception instance>)`` uses
`__str__` and fails with non-ASCII chars in`unicode` arguments.
(work around http://bugs.python.org/issue2517):
* In Python 2, unicode(<exception instance>) fails, with non-ASCII
chars in arguments. (Use case: in some locales, the errstr
argument of IOError contains non-ASCII chars.)
* In Python 2, str(<exception instance>) fails, with non-ASCII chars
in `unicode` arguments.
The `SafeString`, `ErrorString` and `ErrorOutput` classes handle
common exceptions.
"""
import sys, codecs
# Guess the locale's encoding.
# If no valid guess can be made, locale_encoding is set to `None`:
try:
import locale # module missing in Jython
except ImportError:
locale_encoding = None
else:
locale_encoding = locale.getlocale()[1] or locale.getdefaultlocale()[1]
# locale.getpreferredencoding([do_setlocale=True|False])
# has side-effects | might return a wrong guess.
# (cf. Update 1 in http://stackoverflow.com/questions/4082645/using-python-2-xs-locale-module-to-format-numbers-and-currency)
try:
codecs.lookup(locale_encoding or '') # None -> ''
except LookupError:
locale_encoding = None
class SafeString(object):
"""
A wrapper providing robust conversion to `str` and `unicode`.
"""
def __init__(self, data, encoding=None, encoding_errors='backslashreplace',
decoding_errors='replace'):
self.data = data
self.encoding = (encoding or getattr(data, 'encoding', None) or
locale_encoding or 'ascii')
self.encoding_errors = encoding_errors
self.decoding_errors = decoding_errors
def __str__(self):
try:
return str(self.data)
except UnicodeEncodeError, err:
if isinstance(self.data, Exception):
args = [str(SafeString(arg, self.encoding,
self.encoding_errors))
for arg in self.data.args]
return ', '.join(args)
if isinstance(self.data, unicode):
if sys.version_info > (3,0):
return self.data
else:
return self.data.encode(self.encoding,
self.encoding_errors)
raise
def __unicode__(self):
"""
Return unicode representation of `self.data`.
Try ``unicode(self.data)``, catch `UnicodeError` and
* if `self.data` is an Exception instance, work around
http://bugs.python.org/issue2517 with an emulation of
Exception.__unicode__,
* else decode with `self.encoding` and `self.decoding_errors`.
"""
try:
u = unicode(self.data)
if isinstance(self.data, EnvironmentError):
u = u.replace(": u'", ": '") # normalize filename quoting
return u
except UnicodeError, error: # catch ..Encode.. and ..Decode.. errors
if isinstance(self.data, EnvironmentError):
return u"[Errno %s] %s: '%s'" % (self.data.errno,
SafeString(self.data.strerror, self.encoding,
self.decoding_errors),
SafeString(self.data.filename, self.encoding,
self.decoding_errors))
if isinstance(self.data, Exception):
args = [unicode(SafeString(arg, self.encoding,
decoding_errors=self.decoding_errors))
for arg in self.data.args]
return u', '.join(args)
if isinstance(error, UnicodeDecodeError):
return unicode(self.data, self.encoding, self.decoding_errors)
raise
class ErrorString(SafeString):
"""
Safely report exception type and message.
"""
def __str__(self):
return '%s: %s' % (self.data.__class__.__name__,
super(ErrorString, self).__str__())
def __unicode__(self):
return u'%s: %s' % (self.data.__class__.__name__,
super(ErrorString, self).__unicode__())
class ErrorOutput(object):
"""
Wrapper class for file-like error streams with
failsave de- and encoding of `str`, `bytes`, `unicode` and
`Exception` instances.
"""
def __init__(self, stream=None, encoding=None,
encoding_errors='backslashreplace',
decoding_errors='replace'):
"""
:Parameters:
- `stream`: a file-like object,
a string (path to a file),
`None` (write to `sys.stderr`, default), or
evaluating to `False` (write() requests are ignored).
- `encoding`: `stream` text encoding. Guessed if None.
- `encoding_errors`: how to treat encoding errors.
"""
if stream is None:
stream = sys.stderr
elif not(stream):
stream = False
# if `stream` is a file name, open it
elif isinstance(stream, str):
stream = open(stream, 'w')
elif isinstance(stream, unicode):
stream = open(stream.encode(sys.getfilesystemencoding()), 'w')
self.stream = stream
"""Where warning output is sent."""
self.encoding = (encoding or getattr(stream, 'encoding', None) or
locale_encoding or 'ascii')
"""The output character encoding."""
self.encoding_errors = encoding_errors
"""Encoding error handler."""
self.decoding_errors = decoding_errors
"""Decoding error handler."""
def write(self, data):
"""
Write `data` to self.stream. Ignore, if self.stream is False.
`data` can be a `string`, `unicode`, or `Exception` instance.
"""
if self.stream is False:
return
if isinstance(data, Exception):
data = unicode(SafeString(data, self.encoding,
self.encoding_errors, self.decoding_errors))
try:
self.stream.write(data)
except UnicodeEncodeError:
self.stream.write(data.encode(self.encoding, self.encoding_errors))
except TypeError: # in Python 3, stderr expects unicode
if self.stream in (sys.stderr, sys.stdout):
self.stream.buffer.write(data) # write bytes to raw stream
else:
self.stream.write(unicode(data, self.encoding,
self.decoding_errors))
def close(self):
"""
Close the error-output stream.
Ignored if the stream is` sys.stderr` or `sys.stdout` or has no
close() method.
"""
if self.stream in (sys.stdout, sys.stderr):
return
try:
self.stream.close()
except AttributeError:
pass
| apache-2.0 |
xiaoyaozi5566/DynamicCache | src/mem/ruby/network/simple/SimpleLink.py | 18 | 1846 | # Copyright (c) 2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from BasicLink import BasicIntLink, BasicExtLink
class SimpleExtLink(BasicExtLink):
type = 'SimpleExtLink'
class SimpleIntLink(BasicIntLink):
type = 'SimpleIntLink'
| bsd-3-clause |
earshel/PokeyPyManager | POGOProtos/Enums/IapItemCategory_pb2.py | 16 | 2408 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Enums/IapItemCategory.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Enums/IapItemCategory.proto',
package='POGOProtos.Enums',
syntax='proto3',
serialized_pb=_b('\n&POGOProtos/Enums/IapItemCategory.proto\x12\x10POGOProtos.Enums*\x94\x01\n\x13HoloIapItemCategory\x12\x15\n\x11IAP_CATEGORY_NONE\x10\x00\x12\x17\n\x13IAP_CATEGORY_BUNDLE\x10\x01\x12\x16\n\x12IAP_CATEGORY_ITEMS\x10\x02\x12\x19\n\x15IAP_CATEGORY_UPGRADES\x10\x03\x12\x1a\n\x16IAP_CATEGORY_POKECOINS\x10\x04\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_HOLOIAPITEMCATEGORY = _descriptor.EnumDescriptor(
name='HoloIapItemCategory',
full_name='POGOProtos.Enums.HoloIapItemCategory',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_BUNDLE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_ITEMS', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_UPGRADES', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_POKECOINS', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=61,
serialized_end=209,
)
_sym_db.RegisterEnumDescriptor(_HOLOIAPITEMCATEGORY)
HoloIapItemCategory = enum_type_wrapper.EnumTypeWrapper(_HOLOIAPITEMCATEGORY)
IAP_CATEGORY_NONE = 0
IAP_CATEGORY_BUNDLE = 1
IAP_CATEGORY_ITEMS = 2
IAP_CATEGORY_UPGRADES = 3
IAP_CATEGORY_POKECOINS = 4
DESCRIPTOR.enum_types_by_name['HoloIapItemCategory'] = _HOLOIAPITEMCATEGORY
# @@protoc_insertion_point(module_scope)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.