repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
asser/django | tests/utils_tests/test_regex_helper.py | 448 | 1784 | from __future__ import unicode_literals
import unittest
from django.utils import regex_helper
class NormalizeTests(unittest.TestCase):
def test_empty(self):
pattern = r""
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_escape(self):
pattern = r"\\\^\$\.\|\?\*\+\(\)\["
expected = [('\\^$.|?*+()[', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_positional(self):
pattern = r"(.*)-(.+)"
expected = [('%(_0)s-%(_1)s', ['_0', '_1'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_ignored(self):
pattern = r"(?i)(?L)(?m)(?s)(?u)(?#)"
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_noncapturing(self):
pattern = r"(?:non-capturing)"
expected = [('non-capturing', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_named(self):
pattern = r"(?P<first_group_name>.*)-(?P<second_group_name>.*)"
expected = [('%(first_group_name)s-%(second_group_name)s',
['first_group_name', 'second_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_backreference(self):
pattern = r"(?P<first_group_name>.*)-(?P=first_group_name)"
expected = [('%(first_group_name)s-%(first_group_name)s',
['first_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
| bsd-3-clause | -1,635,560,719,436,992,300 | 33.980392 | 71 | 0.570628 | false |
sonofeft/PyHatch | pyhatch/examples/example_1.py | 1 | 1057 | import sys
import os
from pyhatch.hatch_supt import Hatch
SIMPLEDESC='''PyProTem acts as a temporary project for test purposes.'''
LONGDESC="""Use pyProTem to test tox usage locally, travis CI on checkin to
GitHub, tk_nosy to watch files locally and alert breakage, operation under
both python 2 and 3 on Windows and Linux."""
h = Hatch(projName='PyProTem',
mainPyFileName='main.py',
mainDefinesClass='Y',
mainClassName='ProTem',
mainFunctionName='my_function',
author='Some Guy',
github_user_name='somekindaguy',
proj_copyright='Copyright (c) 2015 Some Guy',
proj_license='GPL-3',
version='0.1.3',
email='[email protected]',
status='4 - Beta',
simpleDesc=SIMPLEDESC,
longDesc=LONGDESC,
year=None, # if None, set to this year
organization=None) # if None, set to author
# This example places project into user home directory
h.save_project_below_this_dir( os.path.expanduser('~/') )
| gpl-3.0 | 7,262,015,263,681,618,000 | 33.096774 | 76 | 0.637654 | false |
natanlailari/PennApps2015-Heartmates | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 328 | 10347 | # urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from ._collections import HTTPHeaderDict
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
| apache-2.0 | 5,544,893,342,892,006,000 | 32.594156 | 89 | 0.580845 | false |
lokeshjindal15/pd-gem5 | src/arch/alpha/AlphaInterrupts.py | 69 | 1754 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
class AlphaInterrupts(SimObject):
type = 'AlphaInterrupts'
cxx_class = 'AlphaISA::Interrupts'
cxx_header = "arch/alpha/interrupts.hh"
| bsd-3-clause | -2,305,042,434,960,316,000 | 50.588235 | 72 | 0.788483 | false |
henridwyer/scikit-learn | sklearn/gaussian_process/regression_models.py | 259 | 2166 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in regression models submodule for the gaussian_process module.
"""
import numpy as np
def constant(x):
"""
Zero order polynomial (constant, p = 1) regression model.
x --> f(x) = 1
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.ones([n_eval, 1])
return f
def linear(x):
"""
First order polynomial (linear, p = n+1) regression model.
x --> f(x) = [ 1, x_1, ..., x_n ].T
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.hstack([np.ones([n_eval, 1]), x])
return f
def quadratic(x):
"""
Second order polynomial (quadratic, p = n*(n-1)/2+n+1) regression model.
x --> f(x) = [ 1, { x_i, i = 1,...,n }, { x_i * x_j, (i,j) = 1,...,n } ].T
i > j
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval, n_features = x.shape
f = np.hstack([np.ones([n_eval, 1]), x])
for k in range(n_features):
f = np.hstack([f, x[:, k, np.newaxis] * x[:, k:]])
return f
| bsd-3-clause | -6,285,011,764,685,540,000 | 23.337079 | 79 | 0.54386 | false |
Arafatk/sympy | sympy/vector/coordsysrect.py | 9 | 24501 | from sympy.core.basic import Basic
from sympy.vector.scalar import BaseScalar
from sympy import eye, trigsimp, ImmutableMatrix as Matrix, Symbol
from sympy.core.compatibility import string_types, range
from sympy.core.cache import cacheit
from sympy.vector.orienters import (Orienter, AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
import sympy.vector
class CoordSysCartesian(Basic):
"""
Represents a coordinate system in 3-D space.
"""
def __new__(cls, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None):
"""
The orientation/location parameters are necessary if this system
is being defined at a certain orientation or location wrt another.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
location : Vector
The position vector of the new system's origin wrt the parent
instance.
rotation_matrix : SymPy ImmutableMatrix
The rotation matrix of the new coordinate system with respect
to the parent. In other words, the output of
new_system.rotation_matrix(parent).
parent : CoordSysCartesian
The coordinate system wrt which the orientation/location
(or both) is being defined.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
"""
name = str(name)
Vector = sympy.vector.Vector
BaseVector = sympy.vector.BaseVector
Point = sympy.vector.Point
if not isinstance(name, string_types):
raise TypeError("name should be a string")
#If orientation information has been provided, store
#the rotation matrix accordingly
if rotation_matrix is None:
parent_orient = Matrix(eye(3))
else:
if not isinstance(rotation_matrix, Matrix):
raise TypeError("rotation_matrix should be an Immutable" +
"Matrix instance")
parent_orient = rotation_matrix
#If location information is not given, adjust the default
#location as Vector.zero
if parent is not None:
if not isinstance(parent, CoordSysCartesian):
raise TypeError("parent should be a " +
"CoordSysCartesian/None")
if location is None:
location = Vector.zero
else:
if not isinstance(location, Vector):
raise TypeError("location should be a Vector")
#Check that location does not contain base
#scalars
for x in location.free_symbols:
if isinstance(x, BaseScalar):
raise ValueError("location should not contain" +
" BaseScalars")
origin = parent.origin.locate_new(name + '.origin',
location)
else:
location = Vector.zero
origin = Point(name + '.origin')
#All systems that are defined as 'roots' are unequal, unless
#they have the same name.
#Systems defined at same orientation/position wrt the same
#'parent' are equal, irrespective of the name.
#This is true even if the same orientation is provided via
#different methods like Axis/Body/Space/Quaternion.
#However, coincident systems may be seen as unequal if
#positioned/oriented wrt different parents, even though
#they may actually be 'coincident' wrt the root system.
if parent is not None:
obj = super(CoordSysCartesian, cls).__new__(
cls, Symbol(name), location, parent_orient, parent)
else:
obj = super(CoordSysCartesian, cls).__new__(
cls, Symbol(name), location, parent_orient)
obj._name = name
#Initialize the base vectors
if vector_names is None:
vector_names = (name + '.i', name + '.j', name + '.k')
latex_vects = [(r'\mathbf{\hat{i}_{%s}}' % name),
(r'\mathbf{\hat{j}_{%s}}' % name),
(r'\mathbf{\hat{k}_{%s}}' % name)]
pretty_vects = (name + '_i', name + '_j', name + '_k')
else:
_check_strings('vector_names', vector_names)
vector_names = list(vector_names)
latex_vects = [(r'\mathbf{\hat{%s}_{%s}}' % (x, name)) for
x in vector_names]
pretty_vects = [(name + '_' + x) for x in vector_names]
obj._i = BaseVector(vector_names[0], 0, obj,
pretty_vects[0], latex_vects[0])
obj._j = BaseVector(vector_names[1], 1, obj,
pretty_vects[1], latex_vects[1])
obj._k = BaseVector(vector_names[2], 2, obj,
pretty_vects[2], latex_vects[2])
#Initialize the base scalars
if variable_names is None:
variable_names = (name + '.x', name + '.y', name + '.z')
latex_scalars = [(r"\mathbf{{x}_{%s}}" % name),
(r"\mathbf{{y}_{%s}}" % name),
(r"\mathbf{{z}_{%s}}" % name)]
pretty_scalars = (name + '_x', name + '_y', name + '_z')
else:
_check_strings('variable_names', vector_names)
variable_names = list(variable_names)
latex_scalars = [(r"\mathbf{{%s}_{%s}}" % (x, name)) for
x in variable_names]
pretty_scalars = [(name + '_' + x) for x in variable_names]
obj._x = BaseScalar(variable_names[0], 0, obj,
pretty_scalars[0], latex_scalars[0])
obj._y = BaseScalar(variable_names[1], 1, obj,
pretty_scalars[1], latex_scalars[1])
obj._z = BaseScalar(variable_names[2], 2, obj,
pretty_scalars[2], latex_scalars[2])
#Assign a Del operator instance
from sympy.vector.deloperator import Del
obj._delop = Del(obj)
#Assign params
obj._parent = parent
if obj._parent is not None:
obj._root = obj._parent._root
else:
obj._root = obj
obj._parent_rotation_matrix = parent_orient
obj._origin = origin
#Return the instance
return obj
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_sympystr = __str__
def __iter__(self):
return iter([self.i, self.j, self.k])
@property
def origin(self):
return self._origin
@property
def delop(self):
return self._delop
@property
def i(self):
return self._i
@property
def j(self):
return self._j
@property
def k(self):
return self._k
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
def base_vectors(self):
return (self._i, self._j, self._k)
def base_scalars(self):
return (self._x, self._y, self._z)
@cacheit
def rotation_matrix(self, other):
"""
Returns the direction cosine matrix(DCM), also known as the
'rotation matrix' of this coordinate system with respect to
another system.
If v_a is a vector defined in system 'A' (in matrix format)
and v_b is the same vector defined in system 'B', then
v_a = A.rotation_matrix(B) * v_b.
A SymPy Matrix is returned.
Parameters
==========
other : CoordSysCartesian
The system which the DCM is generated to.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSysCartesian('N')
>>> A = N.orient_new_axis('A', q1, N.i)
>>> N.rotation_matrix(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
from sympy.vector.functions import _path
if not isinstance(other, CoordSysCartesian):
raise TypeError(str(other) +
" is not a CoordSysCartesian")
#Handle special cases
if other == self:
return eye(3)
elif other == self._parent:
return self._parent_rotation_matrix
elif other._parent == self:
return other._parent_rotation_matrix.T
#Else, use tree to calculate position
rootindex, path = _path(self, other)
result = eye(3)
i = -1
for i in range(rootindex):
result *= path[i]._parent_rotation_matrix
i += 2
while i < len(path):
result *= path[i]._parent_rotation_matrix.T
i += 1
return result
@cacheit
def position_wrt(self, other):
"""
Returns the position vector of the origin of this coordinate
system with respect to another Point/CoordSysCartesian.
Parameters
==========
other : Point/CoordSysCartesian
If other is a Point, the position of this system's origin
wrt it is returned. If its an instance of CoordSyRect,
the position wrt its origin is returned.
Examples
========
>>> from sympy.vector import Point, CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> N1 = N.locate_new('N1', 10 * N.i)
>>> N.position_wrt(N1)
(-10)*N.i
"""
return self.origin.position_wrt(other)
def scalar_map(self, other):
"""
Returns a dictionary which expresses the coordinate variables
(base scalars) of this frame in terms of the variables of
otherframe.
Parameters
==========
otherframe : CoordSysCartesian
The other system to map the variables to.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import Symbol
>>> A = CoordSysCartesian('A')
>>> q = Symbol('q')
>>> B = A.orient_new_axis('B', q, A.k)
>>> A.scalar_map(B)
{A.x: B.x*cos(q) - B.y*sin(q), A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}
"""
relocated_scalars = []
origin_coords = tuple(self.position_wrt(other).to_matrix(other))
for i, x in enumerate(other.base_scalars()):
relocated_scalars.append(x - origin_coords[i])
vars_matrix = (self.rotation_matrix(other) *
Matrix(relocated_scalars))
mapping = {}
for i, x in enumerate(self.base_scalars()):
mapping[x] = trigsimp(vars_matrix[i])
return mapping
def locate_new(self, name, position, vector_names=None,
variable_names=None):
"""
Returns a CoordSysCartesian with its origin located at the given
position wrt this coordinate system's origin.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
position : Vector
The position vector of the new system's origin wrt this
one.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> A = CoordSysCartesian('A')
>>> B = A.locate_new('B', 10 * A.i)
>>> B.origin.position_wrt(A.origin)
10*A.i
"""
return CoordSysCartesian(name, location=position,
vector_names=vector_names,
variable_names=variable_names,
parent=self)
def orient_new(self, name, orienters, location=None,
vector_names=None, variable_names=None):
"""
Creates a new CoordSysCartesian oriented in the user-specified way
with respect to this system.
Please refer to the documentation of the orienter classes
for more information about the orientation procedure.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
orienters : iterable/Orienter
An Orienter or an iterable of Orienters for orienting the
new coordinate system.
If an Orienter is provided, it is applied to get the new
system.
If an iterable is provided, the orienters will be applied
in the order in which they appear in the iterable.
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSysCartesian('N')
Using an AxisOrienter
>>> from sympy.vector import AxisOrienter
>>> axis_orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> A = N.orient_new('A', (axis_orienter, ))
Using a BodyOrienter
>>> from sympy.vector import BodyOrienter
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> B = N.orient_new('B', (body_orienter, ))
Using a SpaceOrienter
>>> from sympy.vector import SpaceOrienter
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> C = N.orient_new('C', (space_orienter, ))
Using a QuaternionOrienter
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> D = N.orient_new('D', (q_orienter, ))
"""
if isinstance(orienters, Orienter):
if isinstance(orienters, AxisOrienter):
final_matrix = orienters.rotation_matrix(self)
else:
final_matrix = orienters.rotation_matrix()
else:
final_matrix = Matrix(eye(3))
for orienter in orienters:
if isinstance(orienter, AxisOrienter):
final_matrix *= orienter.rotation_matrix(self)
else:
final_matrix *= orienter.rotation_matrix()
return CoordSysCartesian(name, rotation_matrix=final_matrix,
vector_names=vector_names,
variable_names=variable_names,
location = location,
parent=self)
def orient_new_axis(self, name, angle, axis, location=None,
vector_names=None, variable_names=None):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
name : string
The name of the new coordinate system
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSysCartesian('N')
>>> B = N.orient_new_axis('B', q1, N.i + 2 * N.j)
"""
orienter = AxisOrienter(angle, axis)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_body(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see http://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> D = N.orient_new_body('D', q1, q2, q3, '123')
is same as
>>> D = N.orient_new_axis('D', q1, N.i)
>>> D = D.orient_new_axis('D', q2, D.j)
>>> D = D.orient_new_axis('D', q3, D.k)
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> B = N.orient_new_body('B', q1, q2, q3, '123')
>>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ')
>>> B = N.orient_new_body('B', 0, 0, 0, 'XYX')
"""
orienter = BodyOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_space(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
See Also
========
CoordSysCartesian.orient_new_body : method to orient via Euler
angles
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> D = N.orient_new_space('D', q1, q2, q3, '312')
is same as
>>> B = N.orient_new_axis('B', q1, N.i)
>>> C = B.orient_new_axis('C', q2, N.j)
>>> D = C.orient_new_axis('D', q3, N.k)
"""
orienter = SpaceOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_quaternion(self, name, q0, q1, q2, q3, location=None,
vector_names=None, variable_names=None):
"""
Quaternion orientation orients the new CoordSysCartesian with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
name : string
The name of the new coordinate system
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSysCartesian('N')
>>> B = N.orient_new_quaternion('B', q0, q1, q2, q3)
"""
orienter = QuaternionOrienter(q0, q1, q2, q3)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def __init__(self, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None,
latex_vects=None, pretty_vects=None, latex_scalars=None,
pretty_scalars=None):
#Dummy initializer for setting docstring
pass
__init__.__doc__ = __new__.__doc__
def _check_strings(arg_name, arg):
errorstr = arg_name + " must be an iterable of 3 string-types"
if len(arg) != 3:
raise ValueError(errorstr)
try:
for s in arg:
if not isinstance(s, string_types):
raise TypeError(errorstr)
except:
raise TypeError(errorstr)
| bsd-3-clause | -2,273,897,078,371,305,700 | 33.508451 | 78 | 0.553161 | false |
jhg/django | tests/utils_tests/test_baseconv.py | 326 | 1787 | from unittest import TestCase
from django.utils.baseconv import (
BaseConverter, base2, base16, base36, base56, base62, base64,
)
from django.utils.six.moves import range
class TestBaseConv(TestCase):
def test_baseconv(self):
nums = [-10 ** 10, 10 ** 10] + list(range(-100, 100))
for converter in [base2, base16, base36, base56, base62, base64]:
for i in nums:
self.assertEqual(i, converter.decode(converter.encode(i)))
def test_base11(self):
base11 = BaseConverter('0123456789-', sign='$')
self.assertEqual(base11.encode(1234), '-22')
self.assertEqual(base11.decode('-22'), 1234)
self.assertEqual(base11.encode(-1234), '$-22')
self.assertEqual(base11.decode('$-22'), -1234)
def test_base20(self):
base20 = BaseConverter('0123456789abcdefghij')
self.assertEqual(base20.encode(1234), '31e')
self.assertEqual(base20.decode('31e'), 1234)
self.assertEqual(base20.encode(-1234), '-31e')
self.assertEqual(base20.decode('-31e'), -1234)
def test_base64(self):
self.assertEqual(base64.encode(1234), 'JI')
self.assertEqual(base64.decode('JI'), 1234)
self.assertEqual(base64.encode(-1234), '$JI')
self.assertEqual(base64.decode('$JI'), -1234)
def test_base7(self):
base7 = BaseConverter('cjdhel3', sign='g')
self.assertEqual(base7.encode(1234), 'hejd')
self.assertEqual(base7.decode('hejd'), 1234)
self.assertEqual(base7.encode(-1234), 'ghejd')
self.assertEqual(base7.decode('ghejd'), -1234)
def test_exception(self):
self.assertRaises(ValueError, BaseConverter, 'abc', sign='a')
self.assertIsInstance(BaseConverter('abc', sign='d'), BaseConverter)
| bsd-3-clause | -366,946,338,648,132,350 | 37.847826 | 76 | 0.63906 | false |
abhishekgahlot/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause | 7,327,240,810,867,790,000 | 36.306452 | 77 | 0.677043 | false |
RoshaneH/INFO3180lab4 | lib/werkzeug/testsuite/datastructures.py | 97 | 27488 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.datastructures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the functionality of the provided Werkzeug
datastructures.
TODO:
- FileMultiDict
- Immutable types undertested
- Split up dict tests
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
import pickle
from contextlib import contextmanager
from copy import copy
from werkzeug import datastructures
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
iterlistvalues, text_type
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.exceptions import BadRequestKeyError
class NativeItermethodsTestCase(WerkzeugTestCase):
def test_basic(self):
@datastructures.native_itermethods(['keys', 'values', 'items'])
class StupidDict(object):
def keys(self, multi=1):
return iter(['a', 'b', 'c'] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(zip(iterkeys(self, multi=multi),
itervalues(self, multi=multi)))
d = StupidDict()
expected_keys = ['a', 'b', 'c']
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
self.assert_equal(list(iterkeys(d)), expected_keys)
self.assert_equal(list(itervalues(d)), expected_values)
self.assert_equal(list(iteritems(d)), expected_items)
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_pickle(self):
cls = self.storage_class
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = cls()
d.setlist(b'foo', [1, 2, 3, 4])
d.setlist(b'bar', b'foo bar baz'.split())
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
self.assert_equal(type(ud), type(d))
self.assert_equal(ud, d)
self.assert_equal(pickle.loads(
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
ud[b'newkey'] = b'bla'
self.assert_not_equal(ud, d)
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
class ImmutableDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_follows_dict_interface(self):
cls = self.storage_class
data = {'foo': 1, 'bar': 2, 'baz': 3}
d = cls(data)
self.assert_equal(d['foo'], 1)
self.assert_equal(d['bar'], 2)
self.assert_equal(d['baz'], 3)
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
self.assert_true('foo' in d)
self.assert_true('foox' not in d)
self.assert_equal(len(d), 3)
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({'a': 1})
with self.assert_raises(TypeError):
immutable.pop('a')
mutable = immutable.copy()
mutable.pop('a')
self.assert_true('a' in immutable)
self.assert_true(mutable is not immutable)
self.assert_true(copy(immutable) is immutable)
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': 1, 'b': 2})
immutable2 = cls({'a': 2, 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableTypeConversionDict
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': [1, 2], 'b': 2})
immutable2 = cls({'a': [1], 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableDict
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
self.assert_not_equal(hash(a), hash(b))
class MultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.MultiDict
def test_multidict_pop(self):
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
d = make_d()
self.assert_equal(d.pop('foo'), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foo', 32), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foos', 32), 32)
assert d
with self.assert_raises(KeyError):
d.pop('foos')
def test_setlistdefault(self):
md = self.storage_class()
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
self.assert_equal(md.getlist('u'), [-1, -2])
self.assert_equal(md['u'], -1)
def test_iter_interfaces(self):
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
self.assert_equal(list(zip(md.keys(), md.listvalues())),
list(md.lists()))
self.assert_equal(list(zip(md, iterlistvalues(md))),
list(iterlists(md)))
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
list(iterlists(md)))
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add('foo', 'bar')
self.assert_equal(len(d), 1)
d.add('foo', 'baz')
self.assert_equal(len(d), 1)
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
self.assert_equal(list(d), ['foo'])
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 'bar'), ('foo', 'baz')])
del d['foo']
assert not d
self.assert_equal(len(d), 0)
self.assert_equal(list(d), [])
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
d.add('foo', 3)
self.assert_equal(d.getlist('foo'), [1, 2, 3])
self.assert_equal(d.getlist('bar'), [42])
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
expected = ['foo', 'bar']
self.assert_sequence_equal(list(d.keys()), expected)
self.assert_sequence_equal(list(d), expected)
self.assert_sequence_equal(list(iterkeys(d)), expected)
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
self.assert_equal(len(d), 2)
self.assert_equal(d.pop('foo'), 1)
assert d.pop('blafasel', None) is None
self.assert_equal(d.pop('blafasel', 42), 42)
self.assert_equal(len(d), 1)
self.assert_equal(d.poplist('bar'), [42])
assert not d
d.get('missingkey') is None
d.add('foo', 42)
d.add('foo', 23)
d.add('bar', 2)
d.add('foo', 42)
self.assert_equal(d, datastructures.MultiDict(d))
id = self.storage_class(d)
self.assert_equal(d, id)
d.add('foo', 2)
assert d != id
d.update({'blah': [1, 2, 3]})
self.assert_equal(d['blah'], 1)
self.assert_equal(d.getlist('blah'), [1, 2, 3])
# setlist works
d = self.storage_class()
d['foo'] = 42
d.setlist('foo', [1, 2])
self.assert_equal(d.getlist('foo'), [1, 2])
with self.assert_raises(BadRequestKeyError):
d.pop('missing')
with self.assert_raises(BadRequestKeyError):
d['missing']
# popping
d = self.storage_class()
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitem(), ('foo', 23))
with self.assert_raises(BadRequestKeyError):
d.popitem()
assert not d
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
with self.assert_raises(BadRequestKeyError):
d.popitemlist()
def test_iterables(self):
a = datastructures.MultiDict((("key_a", "value_a"),))
b = datastructures.MultiDict((("key_b", "value_b"),))
ab = datastructures.CombinedMultiDict((a,b))
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
class CombinedMultiDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CombinedMultiDict
def test_basic_interface(self):
d1 = datastructures.MultiDict([('foo', '1')])
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
d = self.storage_class([d1, d2])
# lookup
self.assert_equal(d['foo'], '1')
self.assert_equal(d['bar'], '2')
self.assert_equal(d.getlist('bar'), ['2', '3'])
self.assert_equal(sorted(d.items()),
[('bar', '2'), ('foo', '1')])
self.assert_equal(sorted(d.items(multi=True)),
[('bar', '2'), ('bar', '3'), ('foo', '1')])
assert 'missingkey' not in d
assert 'foo' in d
# type lookup
self.assert_equal(d.get('foo', type=int), 1)
self.assert_equal(d.getlist('bar', type=int), [2, 3])
# get key errors for missing stuff
with self.assert_raises(KeyError):
d['missing']
# make sure that they are immutable
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# copies are immutable
d = d.copy()
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# make sure lists merges
md1 = datastructures.MultiDict((("foo", "bar"),))
md2 = datastructures.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
class HeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add('Content-Type', 'text/plain')
headers.add('X-Foo', 'bar')
assert 'x-Foo' in headers
assert 'Content-type' in headers
headers['Content-Type'] = 'foo/bar'
self.assert_equal(headers['Content-Type'], 'foo/bar')
self.assert_equal(len(headers.getlist('Content-Type')), 1)
# list conversion
self.assert_equal(headers.to_wsgi_list(), [
('Content-Type', 'foo/bar'),
('X-Foo', 'bar')
])
self.assert_equal(str(headers), (
"Content-Type: foo/bar\r\n"
"X-Foo: bar\r\n"
"\r\n"))
self.assert_equal(str(self.storage_class()), "\r\n")
# extended add
headers.add('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(headers['Content-Disposition'],
'attachment; filename=foo')
headers.add('x', 'y', z='"')
self.assert_equal(headers['x'], r'y; z="\""')
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class([
('Content-Type', 'text/plain'),
('X-Foo', 'bar'),
('X-Bar', '1'),
('X-Bar', '2')
])
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
self.assert_equal(headers.get('x-Bar'), '1')
self.assert_equal(headers.get('Content-Type'), 'text/plain')
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
headers.pop('X-Baz')
# type conversion
self.assert_equal(headers.get('x-bar', type=int), 1)
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
# list like operations
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
del headers[:2]
del headers[-1]
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
def test_copying(self):
a = self.storage_class([('foo', 'bar')])
b = a.copy()
a.add('foo', 'baz')
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
self.assert_equal(b.getlist('foo'), ['bar'])
def test_popping(self):
headers = self.storage_class([('a', 1)])
self.assert_equal(headers.pop('a'), 1)
self.assert_equal(headers.pop('b', 2), 2)
with self.assert_raises(KeyError):
headers.pop('c')
def test_set_arguments(self):
a = self.storage_class()
a.set('Content-Disposition', 'useless')
a.set('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
def test_reject_newlines(self):
h = self.storage_class()
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
with self.assert_raises(ValueError):
h['foo'] = variation
with self.assert_raises(ValueError):
h.add('foo', variation)
with self.assert_raises(ValueError):
h.add('foo', 'test', option=variation)
with self.assert_raises(ValueError):
h.set('foo', variation)
with self.assert_raises(ValueError):
h.set('foo', 'test', option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('Content-Type', 'application/whocares')
h.set('X-Forwarded-For', '192.168.0.123')
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
self.assert_equal(list(h), [
('X-Foo-Poo', 'bleh'),
('X-Forwarded-For', '192.168.0.123')
])
def test_bytes_operations(self):
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('X-Whoops', b'\xff')
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
class EnvironHeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
'HTTP_CONTENT_TYPE': 'text/html',
'CONTENT_TYPE': 'text/html',
'HTTP_CONTENT_LENGTH': '0',
'CONTENT_LENGTH': '0',
'HTTP_ACCEPT': '*',
'wsgi.version': (1, 0)
}
headers = self.storage_class(broken_env)
assert headers
self.assert_equal(len(headers), 3)
self.assert_equal(sorted(headers), [
('Accept', '*'),
('Content-Length', '0'),
('Content-Type', 'text/html')
])
assert not self.storage_class({'wsgi.version': (1, 0)})
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
def test_return_type_is_unicode(self):
# environ contains native strings; we return unicode
headers = self.storage_class({
'HTTP_FOO': '\xe2\x9c\x93',
'CONTENT_TYPE': 'text/plain',
})
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
assert isinstance(headers['Foo'], text_type)
assert isinstance(headers['Content-Type'], text_type)
iter_output = dict(iter(headers))
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
assert isinstance(iter_output['Foo'], text_type)
assert isinstance(iter_output['Content-Type'], text_type)
def test_bytes_operations(self):
foo_val = '\xff'
h = self.storage_class({
'HTTP_X_FOO': foo_val
})
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
self.assert_equal(h.get('x-foo'), u'\xff')
class HeaderSetTestCase(WerkzeugTestCase):
storage_class = datastructures.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add('foo')
hs.add('bar')
assert 'Bar' in hs
self.assert_equal(hs.find('foo'), 0)
self.assert_equal(hs.find('BAR'), 1)
assert hs.find('baz') < 0
hs.discard('missing')
hs.discard('foo')
assert hs.find('foo') < 0
self.assert_equal(hs.find('bar'), 0)
with self.assert_raises(IndexError):
hs.index('missing')
self.assert_equal(hs.index('bar'), 0)
assert hs
hs.clear()
assert not hs
class ImmutableListTestCase(WerkzeugTestCase):
storage_class = datastructures.ImmutableList
def test_list_hashable(self):
t = (1, 2, 3, 4)
l = self.storage_class(t)
self.assert_equal(hash(t), hash(l))
self.assert_not_equal(t, l)
def make_call_asserter(assert_equal_func, func=None):
"""Utility to assert a certain number of function calls.
>>> assert_calls, func = make_call_asserter(self.assert_equal)
>>> with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert_equal_func(calls[0], count, msg)
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class CallbackDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, 'callback triggered by read-only method'):
# read-only methods
dct['a']
dct.get('a')
self.assert_raises(KeyError, lambda: dct['x'])
'a' in dct
list(iter(dct))
dct.copy()
with assert_calls(0, 'callback triggered without modification'):
# methods that may write but don't
dct.pop('z', None)
dct.setdefault('a')
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, 'callback not triggered by write method'):
# always-write methods
dct['z'] = 123
dct['z'] = 123 # must trigger again
del dct['z']
dct.pop('b', None)
dct.setdefault('x')
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, 'callback triggered by failed del'):
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
with assert_calls(0, 'callback triggered by failed pop'):
self.assert_raises(KeyError, lambda: dct.pop('x'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MultiDictTestCase))
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(HeadersTestCase))
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
return suite
| apache-2.0 | -3,760,828,392,168,323,000 | 33.883249 | 98 | 0.550531 | false |
G33KS44n/mysql-5.6 | xtrabackup/test/python/testtools/runtest.py | 42 | 7648 | # Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
"""Individual test case execution."""
__all__ = [
'MultipleExceptions',
'RunTest',
]
import sys
from testtools.testresult import ExtendedToOriginalDecorator
class MultipleExceptions(Exception):
"""Represents many exceptions raised from some operation.
:ivar args: The sys.exc_info() tuples for each exception.
"""
class RunTest(object):
"""An object to run a test.
RunTest objects are used to implement the internal logic involved in
running a test. TestCase.__init__ stores _RunTest as the class of RunTest
to execute. Passing the runTest= parameter to TestCase.__init__ allows a
different RunTest class to be used to execute the test.
Subclassing or replacing RunTest can be useful to add functionality to the
way that tests are run in a given project.
:ivar case: The test case that is to be run.
:ivar result: The result object a case is reporting to.
:ivar handlers: A list of (ExceptionClass, handler_function) for
exceptions that should be caught if raised from the user
code. Exceptions that are caught are checked against this list in
first to last order. There is a catch-all of 'Exception' at the end
of the list, so to add a new exception to the list, insert it at the
front (which ensures that it will be checked before any existing base
classes in the list. If you add multiple exceptions some of which are
subclasses of each other, add the most specific exceptions last (so
they come before their parent classes in the list).
:ivar exception_caught: An object returned when _run_user catches an
exception.
:ivar _exceptions: A list of caught exceptions, used to do the single
reporting of error/failure/skip etc.
"""
def __init__(self, case, handlers=None):
"""Create a RunTest to run a case.
:param case: A testtools.TestCase test case object.
:param handlers: Exception handlers for this RunTest. These are stored
in self.handlers and can be modified later if needed.
"""
self.case = case
self.handlers = handlers or []
self.exception_caught = object()
self._exceptions = []
def run(self, result=None):
"""Run self.case reporting activity to result.
:param result: Optional testtools.TestResult to report activity to.
:return: The result object the test was run against.
"""
if result is None:
actual_result = self.case.defaultTestResult()
actual_result.startTestRun()
else:
actual_result = result
try:
return self._run_one(actual_result)
finally:
if result is None:
actual_result.stopTestRun()
def _run_one(self, result):
"""Run one test reporting to result.
:param result: A testtools.TestResult to report activity to.
This result object is decorated with an ExtendedToOriginalDecorator
to ensure that the latest TestResult API can be used with
confidence by client code.
:return: The result object the test was run against.
"""
return self._run_prepared_result(ExtendedToOriginalDecorator(result))
def _run_prepared_result(self, result):
"""Run one test reporting to result.
:param result: A testtools.TestResult to report activity to.
:return: The result object the test was run against.
"""
result.startTest(self.case)
self.result = result
try:
self._exceptions = []
self._run_core()
if self._exceptions:
# One or more caught exceptions, now trigger the test's
# reporting method for just one.
e = self._exceptions.pop()
for exc_class, handler in self.handlers:
if isinstance(e, exc_class):
handler(self.case, self.result, e)
break
finally:
result.stopTest(self.case)
return result
def _run_core(self):
"""Run the user supplied test code."""
if self.exception_caught == self._run_user(self.case._run_setup,
self.result):
# Don't run the test method if we failed getting here.
self._run_cleanups(self.result)
return
# Run everything from here on in. If any of the methods raise an
# exception we'll have failed.
failed = False
try:
if self.exception_caught == self._run_user(
self.case._run_test_method, self.result):
failed = True
finally:
try:
if self.exception_caught == self._run_user(
self.case._run_teardown, self.result):
failed = True
finally:
try:
if self.exception_caught == self._run_user(
self._run_cleanups, self.result):
failed = True
finally:
if not failed:
self.result.addSuccess(self.case,
details=self.case.getDetails())
def _run_cleanups(self, result):
"""Run the cleanups that have been added with addCleanup.
See the docstring for addCleanup for more information.
:return: None if all cleanups ran without error,
``exception_caught`` if there was an error.
"""
failing = False
while self.case._cleanups:
function, arguments, keywordArguments = self.case._cleanups.pop()
got_exception = self._run_user(
function, *arguments, **keywordArguments)
if got_exception == self.exception_caught:
failing = True
if failing:
return self.exception_caught
def _run_user(self, fn, *args, **kwargs):
"""Run a user supplied function.
Exceptions are processed by `_got_user_exception`.
:return: Either whatever 'fn' returns or ``exception_caught`` if
'fn' raised an exception.
"""
try:
return fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
return self._got_user_exception(sys.exc_info())
def _got_user_exception(self, exc_info, tb_label='traceback'):
"""Called when user code raises an exception.
If 'exc_info' is a `MultipleExceptions`, then we recurse into it
unpacking the errors that it's made up from.
:param exc_info: A sys.exc_info() tuple for the user error.
:param tb_label: An optional string label for the error. If
not specified, will default to 'traceback'.
:return: 'exception_caught' if we catch one of the exceptions that
have handlers in 'handlers', otherwise raise the error.
"""
if exc_info[0] is MultipleExceptions:
for sub_exc_info in exc_info[1].args:
self._got_user_exception(sub_exc_info, tb_label)
return self.exception_caught
try:
e = exc_info[1]
self.case.onException(exc_info, tb_label=tb_label)
finally:
del exc_info
for exc_class, handler in self.handlers:
if isinstance(e, exc_class):
self._exceptions.append(e)
return self.exception_caught
raise e
| gpl-2.0 | 3,612,749,660,103,432,000 | 37.24 | 79 | 0.598326 | false |
garoose/eecs494.p2 | jni/external/freetype2/src/tools/docmaker/docmaker.py | 463 | 2766 | #!/usr/bin/env python
#
# DocMaker (c) 2002, 2004, 2008 David Turner <[email protected]>
#
# This program is a re-write of the original DocMaker took used
# to generate the API Reference of the FreeType font engine
# by converting in-source comments into structured HTML.
#
# This new version is capable of outputting XML data, as well
# as accepts more liberal formatting options.
#
# It also uses regular expression matching and substitution
# to speed things significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"ht:o:p:", \
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor, project_title, project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
| mit | -2,914,701,671,262,738,000 | 25.09434 | 81 | 0.587852 | false |
lindycoder/fake-switches | fake_switches/dell10g/command_processor/enabled.py | 4 | 9868 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from fake_switches import group_sequences
from fake_switches.dell.command_processor.enabled import DellEnabledCommandProcessor, to_vlan_ranges, _is_vlan_id, \
_assemble_elements_on_lines
from fake_switches.switch_configuration import VlanPort, AggregatedPort
class Dell10GEnabledCommandProcessor(DellEnabledCommandProcessor):
def __init__(self, config):
super(Dell10GEnabledCommandProcessor, self).__init__(config)
def get_port_configuration(self, port):
conf = []
if port.shutdown:
conf.append('shutdown')
if port.description:
conf.append("description \"{}\"".format(port.description))
if port.mode and port.mode != "access":
conf.append('switchport mode {}'.format(port.mode))
if port.access_vlan:
conf.append('switchport access vlan {}'.format(port.access_vlan))
if port.trunk_native_vlan:
conf.append('switchport general pvid {}'.format(port.trunk_native_vlan))
if port.trunk_vlans:
if port.mode == "general":
conf.append('switchport {} allowed vlan add {}'.format(port.mode, to_vlan_ranges(port.trunk_vlans)))
else:
conf.append('switchport trunk allowed vlan {}'.format(to_vlan_ranges(port.trunk_vlans)))
if port.spanning_tree is False:
conf.append("spanning-tree disable")
if port.spanning_tree_portfast:
conf.append("spanning-tree portfast")
if port.lldp_transmit is False:
conf.append('no lldp transmit')
if port.lldp_receive is False:
conf.append('no lldp receive')
if port.lldp_med is False:
conf.append('no lldp med')
if port.lldp_med_transmit_capabilities is False:
conf.append('no lldp med transmit-tlv capabilities')
if port.lldp_med_transmit_network_policy is False:
conf.append('no lldp med transmit-tlv network-policy')
return conf
def do_show(self, *args):
if "running-config".startswith(args[0]):
if len(args) == 1:
self.write_line('!Current Configuration:')
self.write_line('!System Description "............."')
self.write_line('!System Software Version 3.3.7.3')
self.write_line('!Cut-through mode is configured as disabled')
self.write_line('!')
self.write_line('configure')
self.write_vlans()
for port in self.switch_configuration.ports:
port_config = self.get_port_configuration(port)
if len(port_config) > 0:
self.write_line('interface %s' % port.name)
for item in port_config:
self.write_line(item)
self.write_line('exit')
self.write_line('!')
self.write_line('exit')
elif "interface".startswith(args[1]):
interface_name = ' '.join(args[2:])
port = self.switch_configuration.get_port_by_partial_name(interface_name)
if port:
if isinstance(port, VlanPort):
config = self.get_vlan_port_configuration(port)
else:
config = self.get_port_configuration(port)
if len(config) > 0:
for line in config:
self.write_line(line)
else:
self.write_line("")
self.write_line("")
else:
self.write_line("")
self.write_line("An invalid interface has been used for this function")
elif "vlan".startswith(args[0]):
if len(args) == 1:
self.show_vlans(self.switch_configuration.vlans)
elif args[1] == "id":
if len(args) < 3:
self.write_line("")
self.write_line("Command not found / Incomplete command. Use ? to list commands.")
self.write_line("")
elif not _is_vlan_id(args[2]):
self.write_line(" ^")
self.write_line("Invalid input. Please specify an integer in the range 1 to 4093.")
self.write_line("")
else:
vlan = self.switch_configuration.get_vlan(int(args[2]))
if vlan is None:
self.write_line("")
self.write_line("ERROR: This VLAN does not exist.")
self.write_line("")
else:
self.show_vlans([vlan])
elif "interfaces".startswith(args[0]) and "status".startswith(args[1]):
self.show_interfaces_status()
def write_vlans(self):
named_vlans = []
other_vlans = []
for v in self.switch_configuration.vlans:
if v.name is not None:
named_vlans.append(v)
else:
other_vlans.append(v)
for vlan in named_vlans:
self.write_line('vlan {}'.format(vlan.number))
if vlan.name is not None:
self.write_line('name {}'.format(vlan.name))
self.write_line('exit')
self.write_line('vlan {}'.format(to_vlan_ranges([v.number for v in other_vlans])))
self.write_line('exit')
def show_interfaces_status(self):
self.write_line("")
self.write_line("Port Description Vlan Duplex Speed Neg Link Flow Ctrl")
self.write_line(" State Status")
self.write_line("--------- ------------------------- ----- ------ ------- ---- ------ ---------")
for port in self.switch_configuration.ports:
if not isinstance(port, AggregatedPort):
self.write_line(
"Te{name: <7} {desc: <25} {vlan: <5} {duplex: <6} {speed: <7} {neg: <4} {state: <6} {flow}".format(
name=port.name.split(" ")[-1], desc=port.description[:25] if port.description else "", vlan="",
duplex="Full", speed="10000", neg="Auto", state="Up", flow="Active"))
self.write_line("")
self.write_line("")
self.write_line("Port Description Vlan Link")
self.write_line("Channel State")
self.write_line("------- ------------------------------ ----- -------")
for port in self.switch_configuration.ports:
if isinstance(port, AggregatedPort):
self.write_line(
"Po{name: <7} {desc: <28} {vlan: <5} {state}".format(
name=port.name.split(" ")[-1], desc=port.description[:28] if port.description else "",
vlan="trnk", state="Up"))
self.write_line("")
def show_vlans(self, vlans):
self.write_line("")
self.write_line("VLAN Name Ports Type")
self.write_line("----- --------------- ------------- --------------")
for vlan in vlans:
ports_strings = self._build_port_strings(self.get_ports_for_vlan(vlan))
self.write_line("{number: <5} {name: <32} {ports: <13} {type}".format(
number=vlan.number, name=vlan_name(vlan), ports=ports_strings[0],
type="Default" if vlan.number == 1 else "Static"))
for port_string in ports_strings[1:]:
self.write_line("{number: <5} {name: <32} {ports: <13} {type}".format(
number="", name="", ports=port_string, type=""))
self.write_line("")
def _build_port_strings(self, ports):
port_range_list = group_sequences(ports, are_in_sequence=self._are_in_sequence)
port_list = []
for port_range in port_range_list:
first_details = self._get_interface_details(port_range[0].name)
if len(port_range) == 1:
port_list.append("Te{}{}".format(first_details.port_prefix, first_details.port))
else:
port_list.append("Te{0}{1}-{2}".format(first_details.port_prefix, first_details.port, self._get_interface_details(port_range[-1].name).port))
return _assemble_elements_on_lines(port_list, max_line_char=13)
def _get_interface_details(self, interface_name):
interface_descriptor = namedtuple('InterfaceDescriptor', "interface port_prefix port")
re_port_number = re.compile('(\d/\d/)(\d+)')
interface, slot_descriptor = interface_name.split(" ")
port_prefix, port = re_port_number.match(slot_descriptor).groups()
return interface_descriptor(interface, port_prefix, int(port))
def do_terminal(self, *args):
self.write_line("")
def vlan_name(vlan):
if vlan.number == 1:
return "default"
elif vlan.name is not None:
return vlan.name
else:
return "VLAN{}".format(vlan.number)
| apache-2.0 | -2,264,788,642,547,545,000 | 44.685185 | 157 | 0.536786 | false |
rgommers/numpy | numpy/core/tests/test_shape_base.py | 4 | 27579 | import pytest
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
assert_raises_regex, assert_warns, IS_PYPY
)
class TestAtleast1d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack:
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
hstack((np.arange(3) for _ in range(2)))
with assert_warns(FutureWarning):
hstack(map(lambda x: x, np.ones((3, 2))))
class TestVstack:
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
vstack((np.arange(3) for _ in range(2)))
class TestConcatenate:
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# dimensionality must match
assert_raises_regex(
ValueError,
r"all the input arrays must have same number of dimensions, but "
r"the array at index 0 has 1 dimension\(s\) and the array at "
r"index 1 has 2 dimension\(s\)",
np.concatenate, (np.zeros(1), np.zeros((1, 1))))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises_regex(
ValueError,
"all the input array dimensions for the concatenation axis "
"must match exactly, but along dimension {}, the array at "
"index 0 has size 1 and the array at index 1 has size 2"
.format(i),
np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None, dtype="U")
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
out = np.zeros(a.size + len(b))
r = np.concatenate((a, b), axis=None)
rout = np.concatenate((a, b), axis=None, out=out)
assert_(out is rout)
assert_equal(r, rout)
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
# This should probably be deprecated:
r = np.concatenate(x, 100) # axis is >= MAXDIMS
assert_array_equal(x, r)
def test_concatenate(self):
# Test concatenate function
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
out = res.copy()
rout = concatenate((a0, a1, a2), 2, out=out)
assert_(out is rout)
assert_equal(res, rout)
@pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
def test_operator_concat(self):
import operator
a = array([1, 2])
b = array([3, 4])
n = [1,2]
res = array([1, 2, 3, 4])
assert_raises(TypeError, operator.concat, a, b)
assert_raises(TypeError, operator.concat, a, n)
assert_raises(TypeError, operator.concat, n, a)
assert_raises(TypeError, operator.concat, a, 1)
assert_raises(TypeError, operator.concat, 1, a)
def test_bad_out_shape(self):
a = array([1, 2])
b = array([3, 4])
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
@pytest.mark.parametrize("casting",
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
def test_out_and_dtype(self, axis, out_dtype, casting):
# Compare usage of `out=out` with `dtype=out.dtype`
out = np.empty(4, dtype=out_dtype)
to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
with assert_raises(TypeError):
concatenate(to_concat, out=out, axis=axis, casting=casting)
with assert_raises(TypeError):
concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
else:
res_out = concatenate(to_concat, out=out,
axis=axis, casting=casting)
res_dtype = concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
assert res_out is out
assert_array_equal(out, res_dtype)
assert res_dtype.dtype == out_dtype
with assert_raises(TypeError):
concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
@pytest.mark.parametrize("arrs",
[([0.],), ([0.], [1]), ([0], ["string"], [1.])])
def test_dtype_with_promotion(self, arrs, string_dt, axis):
# Note that U0 and S0 should be deprecated eventually and changed to
# actually give the empty string result (together with `np.array`)
res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
# The actual dtype should be identical to a cast (of a double array):
assert res.dtype == np.array(1.).astype(string_dt).dtype
@pytest.mark.parametrize("axis", [None, 0])
def test_string_dtype_does_not_inspect(self, axis):
# The error here currently depends on NPY_USE_NEW_CASTINGIMPL as
# the new version rejects using the "default string length" of 64.
# The new behaviour is better, `np.array()` and `arr.astype()` would
# have to be used instead. (currently only raises due to unsafe cast)
with pytest.raises((ValueError, TypeError)):
np.concatenate(([None], [1]), dtype="S", axis=axis)
with pytest.raises((ValueError, TypeError)):
np.concatenate(([None], [1]), dtype="U", axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
def test_subarray_error(self, axis):
with pytest.raises(TypeError, match=".*subarray dtype"):
np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
def test_stack():
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
# out
out = np.zeros_like(r1)
np.stack((a, b), out=out)
assert_array_equal(out, r1)
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
# generator is deprecated
with assert_warns(FutureWarning):
result = stack((x for x in range(3)))
assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock:
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
# the algorithm is triggered depending on the number of element
# copies required.
# We define a test fixture that forces most tests to go through
# both code paths.
# Ultimately, this should be removed if a single algorithm is found
# to be faster for both small and large arrays.
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == 'force_concatenate':
return _block_force_concatenate
elif request.param == 'force_slicing':
return _block_force_slicing
elif request.param == 'block':
return block
else:
raise ValueError('Unknown blocking request. There is a typo in the tests.')
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([
[
block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [[np.ones((2,3)), np.ones((2,2))],
[np.ones((2,2)), np.ones((2,2))]]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
def test_empty_lists(self, block):
assert_raises_regex(ValueError, 'empty', block, [])
assert_raises_regex(ValueError, 'empty', block, [[]])
assert_raises_regex(ValueError, 'empty', block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
# 3D
arr_c = np.zeros((3,)*3, order='C')
arr_f = np.zeros((3,)*3, order='F')
b_c = [[[arr_c, arr_c],
[arr_c, arr_c]],
[[arr_c, arr_c],
[arr_c, arr_c]]]
b_f = [[[arr_f, arr_f],
[arr_f, arr_f]],
[[arr_f, arr_f],
[arr_f, arr_f]]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
arr_c = np.zeros((3, 3), order='C')
arr_f = np.zeros((3, 3), order='F')
# 2D
b_c = [[arr_c, arr_c],
[arr_c, arr_c]]
b_f = [[arr_f, arr_f],
[arr_f, arr_f]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
def test_block_dispatcher():
class ArrayLike:
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
# don't recurse into non-lists
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
| bsd-3-clause | 4,504,274,726,209,969,000 | 35.003916 | 98 | 0.490917 | false |
cole945/binutils-gdb | gdb/testsuite/gdb.python/py-frame-args.py | 41 | 2186 | # Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import gdb
class pp_s (object):
def __init__(self, val):
self.val = val
def to_string(self):
m = self.val["m"]
return "m=<" + str(self.val["m"]) + ">"
class pp_ss (object):
def __init__(self, val):
self.val = val
def to_string(self):
return "super struct"
def children (self):
yield 'a', self.val['a']
yield 'b', self.val['b']
def lookup_function (val):
"Look-up and return a pretty-printer that can print val."
# Get the type.
type = val.type
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
# Get the type name.
typename = type.tag
if typename == None:
return None
# Iterate over local dictionary of types to determine
# if a printer is registered for that type. Return an
# instantiation of the printer if found.
for function in pretty_printers_dict:
if function.match (typename):
return pretty_printers_dict[function] (val)
# Cannot find a pretty printer. Return None.
return None
def register_pretty_printers ():
pretty_printers_dict[re.compile ('^s$')] = pp_s
pretty_printers_dict[re.compile ('^ss$')] = pp_ss
pretty_printers_dict = {}
register_pretty_printers ()
gdb.pretty_printers.append (lookup_function)
| gpl-2.0 | 1,987,379,879,682,412,500 | 28.146667 | 71 | 0.658737 | false |
spulec/moto | tests/test_resourcegroups/test_resourcegroups.py | 1 | 8542 | from __future__ import unicode_literals
import boto3
import json
import sure # noqa
from moto import mock_resourcegroups
@mock_resourcegroups
def test_create_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = resource_groups.create_group(
Name="test_resource_group",
Description="description",
ResourceQuery={
"Type": "TAG_FILTERS_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [
{"Key": "resources_tag_key", "Values": ["resources_tag_value"]}
],
}
),
},
Tags={"resource_group_tag_key": "resource_group_tag_value"},
)
response["Group"]["Name"].should.contain("test_resource_group")
response["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
response["Tags"]["resource_group_tag_key"].should.contain(
"resource_group_tag_value"
)
@mock_resourcegroups
def test_delete_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
test_create_group()
response = resource_groups.delete_group(GroupName="test_resource_group")
response["Group"]["Name"].should.contain("test_resource_group")
response = resource_groups.list_groups()
response["GroupIdentifiers"].should.have.length_of(0)
response["Groups"].should.have.length_of(0)
@mock_resourcegroups
def test_get_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
test_create_group()
response = resource_groups.get_group(GroupName="test_resource_group")
response["Group"]["Description"].should.contain("description")
return response
@mock_resourcegroups
def test_get_group_query():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
get_response = test_get_group()
response = resource_groups.get_group_query(GroupName="test_resource_group")
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
response_get = resource_groups.get_group_query(
Group=get_response.get("Group").get("GroupArn")
)
response_get["GroupQuery"]["ResourceQuery"]["Type"].should.contain(
"TAG_FILTERS_1_0"
)
@mock_resourcegroups
def test_get_tags():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = test_get_group()
response = resource_groups.get_tags(Arn=response["Group"]["GroupArn"])
response["Tags"].should.have.length_of(1)
response["Tags"]["resource_group_tag_key"].should.contain(
"resource_group_tag_value"
)
return response
@mock_resourcegroups
def test_list_groups():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
test_create_group()
response = resource_groups.list_groups()
response["GroupIdentifiers"].should.have.length_of(1)
response["Groups"].should.have.length_of(1)
@mock_resourcegroups
def test_tag():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = test_get_tags()
response = resource_groups.tag(
Arn=response["Arn"],
Tags={"resource_group_tag_key_2": "resource_group_tag_value_2"},
)
response["Tags"]["resource_group_tag_key_2"].should.contain(
"resource_group_tag_value_2"
)
response = resource_groups.get_tags(Arn=response["Arn"])
response["Tags"].should.have.length_of(2)
response["Tags"]["resource_group_tag_key_2"].should.contain(
"resource_group_tag_value_2"
)
@mock_resourcegroups
def test_untag():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = test_get_tags()
response = resource_groups.untag(
Arn=response["Arn"], Keys=["resource_group_tag_key"]
)
response["Keys"].should.contain("resource_group_tag_key")
response = resource_groups.get_tags(Arn=response["Arn"])
response["Tags"].should.have.length_of(0)
@mock_resourcegroups
def test_update_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
get_response = test_get_group()
response = resource_groups.update_group(
GroupName="test_resource_group", Description="description_2"
)
response["Group"]["Description"].should.contain("description_2")
response = resource_groups.get_group(GroupName="test_resource_group")
response["Group"]["Description"].should.contain("description_2")
@mock_resourcegroups
def test_get_group_configuration():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
group = test_get_group()
configuration = [
{
"Type": "AWS::ResourceGroups::Generic",
"Parameters": [
{"Name": "allowed-resource-types", "Values": ["AWS::EC2::Host"]},
{"Name": "deletion-protection", "Values": ["UNLESS_EMPTY"]},
],
}
]
resource_groups.put_group_configuration(
Group=group["Group"]["Name"], Configuration=configuration
)
configuration_resp = resource_groups.get_group_configuration(
Group=group["Group"]["Name"]
)
assert (
configuration_resp.get("GroupConfiguration").get("Configuration")
== configuration
)
@mock_resourcegroups
def test_create_group_with_configuration():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
configuration = [
{
"Type": "AWS::ResourceGroups::Generic",
"Parameters": [
{"Name": "allowed-resource-types", "Values": ["AWS::EC2::Host"]},
{"Name": "deletion-protection", "Values": ["UNLESS_EMPTY"]},
],
}
]
response = resource_groups.create_group(
Name="test_resource_group_new",
Description="description",
ResourceQuery={
"Type": "TAG_FILTERS_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [
{"Key": "resources_tag_key", "Values": ["resources_tag_value"]}
],
}
),
},
Configuration=configuration,
Tags={"resource_group_tag_key": "resource_group_tag_value"},
)
response["Group"]["Name"].should.contain("test_resource_group_new")
assert response["GroupConfiguration"]["Configuration"] == configuration
response["Tags"]["resource_group_tag_key"].should.contain(
"resource_group_tag_value"
)
@mock_resourcegroups
def test_update_group_query():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
group_response = test_get_group()
response = resource_groups.update_group_query(
GroupName="test_resource_group",
ResourceQuery={
"Type": "CLOUDFORMATION_STACK_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"StackIdentifier": (
"arn:aws:cloudformation:eu-west-1:012345678912:stack/"
"test_stack/c223eca0-e744-11e8-8910-500c41f59083"
),
}
),
},
)
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain(
"CLOUDFORMATION_STACK_1_0"
)
response = resource_groups.get_group_query(GroupName="test_resource_group")
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain(
"CLOUDFORMATION_STACK_1_0"
)
response = resource_groups.update_group_query(
Group=group_response.get("Group").get("GroupArn"),
ResourceQuery={
"Type": "TAG_FILTERS_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [
{"Key": "resources_tag_key", "Values": ["resources_tag_value"]}
],
}
),
},
)
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
response = resource_groups.get_group_query(
Group=group_response.get("Group").get("GroupArn")
)
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
| apache-2.0 | 5,990,390,767,077,057,000 | 29.949275 | 87 | 0.604542 | false |
kisel/trex-core | scripts/external_libs/nose-1.3.4/python3/nose/tools/nontrivial.py | 11 | 4170 | """Tools not exempt from being descended into in tracebacks"""
import time
__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup',
'TimeExpired', 'istest', 'nottest']
class TimeExpired(AssertionError):
pass
def make_decorator(func):
"""
Wraps a test decorator so as to properly replicate metadata
of the decorated function, including nose's additional stuff
(namely, setup and teardown).
"""
def decorate(newfunc):
if hasattr(func, 'compat_func_name'):
name = func.compat_func_name
else:
name = func.__name__
newfunc.__dict__ = func.__dict__
newfunc.__doc__ = func.__doc__
newfunc.__module__ = func.__module__
if not hasattr(newfunc, 'compat_co_firstlineno'):
newfunc.compat_co_firstlineno = func.__code__.co_firstlineno
try:
newfunc.__name__ = name
except TypeError:
# can't set func name in 2.3
newfunc.compat_func_name = name
return newfunc
return decorate
def raises(*exceptions):
"""Test must raise one of expected exceptions to pass.
Example use::
@raises(TypeError, ValueError)
def test_raises_type_error():
raise TypeError("This test passes")
@raises(Exception)
def test_that_fails_by_passing():
pass
If you want to test many assertions about exceptions in a single test,
you may want to use `assert_raises` instead.
"""
valid = ' or '.join([e.__name__ for e in exceptions])
def decorate(func):
name = func.__name__
def newfunc(*arg, **kw):
try:
func(*arg, **kw)
except exceptions:
pass
except:
raise
else:
message = "%s() did not raise %s" % (name, valid)
raise AssertionError(message)
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
def set_trace():
"""Call pdb.set_trace in the calling frame, first restoring
sys.stdout to the real output stream. Note that sys.stdout is NOT
reset to whatever it was before the call once pdb is done!
"""
import pdb
import sys
stdout = sys.stdout
sys.stdout = sys.__stdout__
pdb.Pdb().set_trace(sys._getframe().f_back)
def timed(limit):
"""Test must finish within specified time limit to pass.
Example use::
@timed(.1)
def test_that_fails():
time.sleep(.2)
"""
def decorate(func):
def newfunc(*arg, **kw):
start = time.time()
result = func(*arg, **kw)
end = time.time()
if end - start > limit:
raise TimeExpired("Time limit (%s) exceeded" % limit)
return result
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
def with_setup(setup=None, teardown=None):
"""Decorator to add setup and/or teardown methods to a test function::
@with_setup(setup, teardown)
def test_something():
" ... "
Note that `with_setup` is useful *only* for test functions, not for test
methods or inside of TestCase subclasses.
"""
def decorate(func, setup=setup, teardown=teardown):
if setup:
if hasattr(func, 'setup'):
_old_s = func.setup
def _s():
setup()
_old_s()
func.setup = _s
else:
func.setup = setup
if teardown:
if hasattr(func, 'teardown'):
_old_t = func.teardown
def _t():
_old_t()
teardown()
func.teardown = _t
else:
func.teardown = teardown
return func
return decorate
def istest(func):
"""Decorator to mark a function or method as a test
"""
func.__test__ = True
return func
def nottest(func):
"""Decorator to mark a function or method as *not* a test
"""
func.__test__ = False
return func
| apache-2.0 | 414,459,001,142,602,940 | 26.615894 | 76 | 0.548921 | false |
cloudsigma/cgroupspy | cgroupspy/test/test_interfaces.py | 1 | 8905 | """
Copyright (c) 2014, CloudSigma AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CloudSigma AG nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CloudSigma AG BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from unittest import TestCase
import mock
from ..contenttypes import DeviceAccess, DeviceThrottle
from ..interfaces import BaseFileInterface, FlagFile, BitFieldFile, CommaDashSetFile, DictFile, \
IntegerFile, IntegerListFile, ListFile, MultiLineIntegerFile, TypedFile
class FaceHolder(object):
face = None
def __init__(self, init_value):
self.val = init_value
self.last_filename = None
def get_property(self, filename):
self.last_filename = filename
return self.val
def set_property(self, filename, val):
self.last_filename = filename
self.val = str(val)
class InterfacesTest(TestCase):
def patch_face(self, **kwargs):
patch = mock.patch.multiple(FaceHolder, **kwargs)
patch.start()
self.addCleanup(patch.stop)
def test_base(self):
self.patch_face(face=BaseFileInterface("myfile1"))
fh = FaceHolder("23")
self.assertEqual(fh.face, "23")
fh.face = 44
self.assertEqual(fh.face, "44")
def test_flagfile(self):
self.patch_face(face=FlagFile("flagfile"))
fh = FaceHolder("1")
self.assertEqual(fh.face, True)
fh.face = False
self.assertEqual(fh.face, False)
self.assertEqual(fh.val, "0")
fh.face = None
self.assertEqual(fh.face, False)
self.assertEqual(fh.val, "0")
fh.face = 1
self.assertEqual(fh.face, True)
self.assertEqual(fh.val, "1")
def test_bitfieldfile(self):
self.patch_face(face=BitFieldFile("bitfieldfile"))
fh = FaceHolder("2")
self.assertEqual(fh.face, [False, True, False, False, False, False, False, False])
fh.face = [False]
self.assertEqual(fh.face, [False, False, False, False, False, False, False, False])
self.assertEqual(fh.val, "0")
fh.face = [False, True, True]
self.assertEqual(fh.face, [False, True, True, False, False, False, False, False])
self.assertEqual(fh.val, "6")
def test_comma_dash(self):
self.patch_face(face=CommaDashSetFile("commadash"))
fh = FaceHolder("")
self.assertEqual(fh.face, set())
fh = FaceHolder(" ")
self.assertEqual(fh.face, set())
fh.face = set()
self.assertEqual(fh.face, set())
fh.face = []
self.assertEqual(fh.face, set())
fh.face = {}
self.assertEqual(fh.face, set())
fh = FaceHolder("1,2,4-6,18-23,7")
expected = {1, 2, 4, 5, 6, 7, 18, 19, 20, 21, 22, 23}
self.assertEqual(fh.face, expected)
fh.face = {1, 2, 3}
self.assertEqual(fh.face, {1, 2, 3})
self.assertEqual(fh.val, "1-3")
fh.face = [1, 2, 3]
self.assertEqual(fh.face, {1, 2, 3})
self.assertEqual(fh.val, "1-3")
fh.face = [1, 2, 2, 3]
self.assertEqual(fh.face, {1, 2, 3})
self.assertEqual(fh.val, "1-3")
fh.face = {1}
self.assertEqual(fh.face, {1})
self.assertEqual(fh.val, "1")
fh.face = {}
self.assertEqual(fh.face, set([]))
self.assertEqual(fh.val, " ")
def test_dict_file(self):
self.patch_face(face=DictFile("dictfile"))
fh = FaceHolder("ala 123\nbala 123\nnica 456")
self.assertEqual(fh.face, {"ala": 123, "bala": 123, "nica": 456})
def test_int_file(self):
self.patch_face(face=IntegerFile("intfile"))
fh = FaceHolder("16")
self.assertEqual(fh.face, 16)
fh.face = 18
self.assertEqual(fh.face, 18)
self.assertEqual(fh.val, "18")
fh.face = None
self.assertEqual(fh.face, None)
self.assertEqual(fh.val, "-1")
def test_int_list(self):
self.patch_face(face=IntegerListFile("intlistfile"))
fh = FaceHolder("16 18 20")
self.assertEqual(fh.face, [16, 18, 20])
def test_list(self):
self.patch_face(face=ListFile("listfile"))
fh = FaceHolder("16 18 20")
self.assertEqual(fh.face, ["16", "18", "20"])
def test_multiline_int(self):
self.patch_face(face=MultiLineIntegerFile("multiint"))
fh = FaceHolder("16\n18\n20\n22")
self.assertEqual(fh.face, [16, 18, 20, 22])
def test_comma_dash_more():
pairs = [
("1-2,4-7,18-23", {1, 2, 4, 5, 6, 7, 18, 19, 20, 21, 22, 23}),
("1,3-4,6,8-9,11", {1, 3, 4, 6, 8, 9, 11}),
("4,8-10,12", {4, 8, 9, 10, 12}),
("1-3", {1, 2, 3}),
("1", {1}),
]
for encoded, data in pairs:
yield check_comma_dash_case, encoded, data
def check_comma_dash_case(encoded, data):
with mock.patch.multiple(FaceHolder, face=CommaDashSetFile("commadash")):
fh = FaceHolder(encoded)
assert fh.face == data
fh.face = data
assert fh.face == data
assert fh.val == encoded
def test_device_throttle():
pairs = [
("1:2 100", DeviceThrottle(major=1, minor=2, limit=100)),
("1:2 100", DeviceThrottle(major='1', minor='2', limit=100)),
("0:0 100", DeviceThrottle(major=0, minor=0, limit=100)),
("*:* 100", DeviceThrottle(major=None, minor=None, limit=100)),
("0:* 100", DeviceThrottle(major=0, minor=None, limit=100)),
]
for encoded, data in pairs:
yield check_device_throttle_case, encoded, data
yield check_device_throttle_many, " \n", []
yield check_device_throttle_many, "\n".join([p[0] for p in pairs]), [p[1] for p in pairs]
def check_device_throttle_many(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_throttle", DeviceThrottle, many=True)):
fh = FaceHolder(encoded)
assert fh.face == data
def check_device_throttle_case(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_throttle", DeviceThrottle, many=False)):
fh = FaceHolder(encoded)
assert fh.face == data
fh.face = data
assert fh.face == data
assert fh.val == encoded
def test_device_access():
pairs = [
("c 1:3 rwm", DeviceAccess(dev_type="c", major=1, minor=3, access="rwm")),
("c 1:3 rwm", DeviceAccess(dev_type="c", major='1', minor='3', access=7)),
("c 5:* rwm", DeviceAccess(dev_type="c", major=5, minor=None, access="rwm")),
("c 5:0 rwm", DeviceAccess(dev_type="c", major=5, minor=0, access="rwm")),
("b *:* rwm", DeviceAccess(dev_type="b", major=None, minor=None, access="rwm")),
("b 0:0 rwm", DeviceAccess(dev_type="b", major=0, minor=0, access="rwm")),
("c 136:* rw", DeviceAccess(dev_type="c", major=136, minor=None, access="rw")),
]
for encoded, data in pairs:
yield check_device_access_case, encoded, data
yield check_device_access_many, " \n", []
yield check_device_access_many, "\n".join([p[0] for p in pairs]), [p[1] for p in pairs]
def check_device_access_case(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_access", DeviceAccess, many=False)):
fh = FaceHolder(encoded)
assert fh.face == data
fh.face = data
assert fh.face == data
assert fh.val == encoded
def check_device_access_many(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_access", DeviceAccess, many=True)):
fh = FaceHolder(encoded)
assert fh.face == data
| bsd-3-clause | -7,094,775,070,949,177,000 | 33.118774 | 104 | 0.623358 | false |
imre-kerr/swarm-epuck | controllers/swarm_controller/webann.py | 2 | 1182 | # This uses the EpuckBasic code as the interface to webots, and the epuck2 code to connect an ANN
# to webots.
import epuck2
import epuck_basic as epb
import graph
import prims1
# The webann is a descendent of the webot "controller" class, and it has the ANN as an attribute.
class WebAnn(epb.EpuckBasic):
def __init__(self, tempo = 1.0, e_thresh = 125, nvect = True, cvect = True, svect = True, band = 'bw', concol = 1.0, snapshow = True,
ann_cycles = 1, agent_cycles = 5, act_noise = 0.1, tfile = "redman4"):
epb.EpuckBasic.__init__(self)
self.basic_setup() # defined for EpuckBasic
self.ann = epuck2.annpuck2(agent = self, e_thresh = e_thresh, nvect = nvect, cvect = cvect, svect = svect, band = band, snapshow = snapshow,
concol = concol, ann_cycles = ann_cycles, agent_cycles = agent_cycles, act_noise = act_noise,
tfile = tfile)
def long_run(self,steps = 500):
self.ann.simsteps = steps
self.spin_angle(prims1.randab(0,360))
self.ann.redman_run()
#*** MAIN ***
# Webots expects a controller to be created and activated at the bottom of the controller file.
controller = WebAnn(tempo = 1.0, band = 'gray')
controller.long_run(40)
| mit | -1,951,144,392,266,925,000 | 34.818182 | 141 | 0.687817 | false |
terbolous/CouchPotatoServer | libs/pyutil/test/deprecated/test_picklesaver.py | 106 | 1340 | #!/usr/bin/env python
# Copyright (c) 2002 Luke 'Artimage' Nelson
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import os
try:
from twisted.trial import unittest
except ImportError, le:
print "Skipping %s since it requires Twisted and Twisted could not be imported: %s" % (__name__, le,)
else:
from pyutil import PickleSaver, fileutil
class Thingie(PickleSaver.PickleSaver):
def __init__(self, fname, delay=30):
PickleSaver.PickleSaver.__init__(self, fname=fname, attrs={'tmp_store':'False'}, DELAY=delay)
class PickleSaverTest(unittest.TestCase):
def _test_save_now(self, fname):
thingie = Thingie(fname, delay=0)
thingie.tmp_store = 'True'
thingie.lazy_save() # Note: it was constructed with default save delay of 0.
def test_save_now(self):
"""
This test should create a lazy save object, save it with no delay and check if the file exists.
"""
tempdir = fileutil.NamedTemporaryDirectory()
fname = os.path.join(tempdir.name, "picklesavertest")
self._test_save_now(fname)
self.failUnless(os.path.isfile(fname), "The file [%s] does not exist." %(fname,))
tempdir.shutdown()
| gpl-3.0 | 6,023,810,781,254,622,000 | 36.222222 | 107 | 0.635821 | false |
colinbrislawn/bioconda-recipes | recipes/fgbio/fgbio.py | 7 | 2575 | #!/opt/anaconda1anaconda2anaconda3/bin/python
#
# Wrapper script for invoking the jar.
#
# This script is written for use with the Conda package manager and is ported
# from a bash script that does the same thing, adapting the style in
# the peptide-shaker wrapper
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
import subprocess
import sys
from os import access, getenv, path, X_OK
# Expected name of the JAR file.
JAR_NAME = 'fgbio.jar'
# Default options passed to the `java` executable.
DEFAULT_JVM_MEM_OPTS = ['-Xms512m', '-Xmx4g']
def real_dirname(in_path):
"""Returns the symlink-resolved, canonicalized directory-portion of
the given path."""
return path.dirname(path.realpath(in_path))
def java_executable():
"""Returns the name of the Java executable.
Use JAVA_HOME, or local anaconda java, or just `java` in the PATH.
"""
java_home = getenv('JAVA_HOME')
java_bin = path.join('bin', 'java')
if java_home and access(path.join(java_home, java_bin), X_OK):
return path.join(java_home, java_bin)
conda_java_bin = path.join(real_dirname(sys.executable), "java")
if conda_java_bin and path.exists(conda_java_bin) and access(conda_java_bin, X_OK):
return conda_java_bin
return 'java'
def jvm_opts(argv, default_mem_opts=DEFAULT_JVM_MEM_OPTS):
"""Constructs a list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts, prop_opts, pass_args = [], [], []
for arg in argv:
if arg.startswith('-D') or arg.startswith('-XX'):
opts_list = prop_opts
elif arg.startswith('-Xm'):
opts_list = mem_opts
else:
opts_list = pass_args
opts_list.append(arg)
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('org'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = path.join(jar_dir, JAR_NAME)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == "__main__":
main()
| mit | -1,821,160,741,558,966,800 | 29.294118 | 101 | 0.649709 | false |
alexlo03/ansible | lib/ansible/modules/crypto/openssl_privatekey.py | 25 | 10503 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_privatekey
author: "Yanis Guenane (@Spredzy)"
version_added: "2.3"
short_description: Generate OpenSSL private keys.
description:
- "This module allows one to (re)generate OpenSSL private keys. It uses
the pyOpenSSL python library to interact with openssl. One can generate
either RSA or DSA private keys. Keys are generated in PEM format."
requirements:
- "python-pyOpenSSL"
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
size:
required: false
default: 4096
description:
- Size (in bits) of the TLS/SSL key to generate
type:
required: false
default: "RSA"
choices: [ RSA, DSA ]
description:
- The algorithm used to generate the TLS/SSL private key
force:
required: false
default: False
type: bool
description:
- Should the key be regenerated even if it already exists
path:
required: true
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
passphrase:
required: false
description:
- The passphrase for the private key.
version_added: "2.4"
cipher:
required: false
description:
- The cipher to encrypt the private key. (cipher can be found by running `openssl list-cipher-algorithms`)
version_added: "2.4"
extends_documentation_fragment: files
'''
EXAMPLES = '''
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
# and a passphrase
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
# Generate an OpenSSL private key with a different size (2048 bits)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
# Force regenerate an OpenSSL private key if it already exists
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: True
# Generate an OpenSSL private key with a different algorithm (DSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = '''
size:
description: Size (in bits) of the TLS/SSL private key
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key
returned: changed or success
type: string
sample: RSA
filename:
description: Path to the generated TLS/SSL private key file
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description: The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
'''
import os
import traceback
try:
from OpenSSL import crypto
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
class PrivateKeyError(crypto_utils.OpenSSLObjectError):
pass
class PrivateKey(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PrivateKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.mode = module.params.get('mode', None)
if self.mode is None:
self.mode = 0o600
self.type = crypto.TYPE_RSA
if module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False) or self.force:
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
try:
privatekey_file = os.open(self.path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(privatekey_file)
if isinstance(self.mode, string_types):
try:
self.mode = int(self.mode, 8)
except ValueError as e:
try:
st = os.lstat(self.path)
self.mode = AnsibleModule._symbolic_mode_to_octal(st, self.mode)
except ValueError as e:
module.fail_json(msg="%s" % to_native(e), exception=traceback.format_exc())
os.chmod(self.path, self.mode)
privatekey_file = os.open(self.path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, self.mode)
if self.cipher and self.passphrase:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase)))
else:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey))
os.close(privatekey_file)
self.changed = True
except IOError as exc:
self.remove()
raise PrivateKeyError(exc)
self.fingerprint = crypto_utils.get_fingerprint(self.path, self.passphrase)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKey, self).check(module, perms_required)
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
def _check_passphrase():
try:
crypto_utils.load_privatekey(self.path, self.passphrase)
return True
except crypto.Error:
return False
if not state_and_perms or not _check_passphrase():
return False
privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
return _check_size(privatekey) and _check_type(privatekey)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
size=dict(default=4096, type='int'),
type=dict(default='RSA', choices=['RSA', 'DSA'], type='str'),
force=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[['cipher', 'passphrase']],
)
if not pyopenssl_found:
module.fail_json(msg='the python pyOpenSSL module is required')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
private_key = PrivateKey(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = module.params['force'] or not private_key.check(module)
module.exit_json(**result)
try:
private_key.generate(module)
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
private_key.remove()
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
result = private_key.dump()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,459,033,477,999,473,000 | 32.663462 | 159 | 0.601066 | false |
jhseu/tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shapes_for_arguments.py | 21 | 1840 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/shapes_for_arguments | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# Check that we get shapes annotated on function arguments.
#
# Besides checking the shape on the function input argument, this test also
# checks that the shape on the input argument is propagated to the return
# value.
# We eventually want to move the shape inference to a pass separate from
# the initial import, in which case that aspect of this test doesn't make much
# sense and will be superceded by MLIR->MLIR shape inference tests.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<f32> {{.*}}) -> (tensor<f32> {{.*}})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return x
if __name__ == '__main__':
common.do_test(TestModule)
| apache-2.0 | 8,293,778,285,904,808,000 | 39 | 90 | 0.695109 | false |
Hasimir/pyjs | examples/widgets/__main__.py | 8 | 1045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
TARGETS = [
'Widgets.py',
]
PACKAGE = {
'title': 'Widgets',
'desc': 'Widgets example',
}
def setup(targets):
'''Setup example for translation, MUST call util.setup(targets).'''
util.setup(targets)
def translate():
'''Translate example, MUST call util.translate().'''
util.translate()
def install(package):
'''Install and cleanup example module. MUST call util.install(package)'''
util.install(package)
##---------------------------------------##
# --------- (-: DO NOT EDIT :-) --------- #
##---------------------------------------##
import sys
import os
examples = head = os.path.abspath(os.path.dirname(__file__))
while os.path.split(examples)[1].lower() != 'examples':
examples = os.path.split(examples)[0]
if not examples:
raise ValueError("Cannot determine examples directory")
sys.path.insert(0, os.path.join(examples))
from _examples import util
sys.path.pop(0)
util.init(head)
setup(TARGETS)
translate()
install(PACKAGE)
| apache-2.0 | 2,599,677,662,264,465,000 | 18.716981 | 77 | 0.591388 | false |
vaporry/dapp-bin | serpent_gamble/prepare.py | 5 | 2914 | # This is an ultra-minimal "dapp framework" that simplifies the deployment
# process by generating a config.js file that specifies the ABI for all of
# the contracts that you need and also includes the ability to update
# addresses. By default, it simply scans through every .se and .sol file in
# the current directory and adds and ABI object for each one into a JS
# object stored under window.accounts, with the key of each ABI object being
# the filename noc including the .se/.sol ending. For example, wallet.sol
# will create window.accounts.wallet = { abi: ... }
#
# You can also use the x=y syntax to set an address. For example, if you
# call python prepare.py admin=0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae,
# then in the JS object you will get window.accounts.admin =
# '0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae'; this persists if you call
# python prepare.py after without setting the argument again as the script
# always tries to read the value set from the previous time the config.js
# file was created.
#
# Example use:
#
# Step 1: serpent compile currency.se
# Step 2: in eth/geth/pyeth, send a transaction to create a contract whose
# code is the output of the previous line
# Step 3: get the contract address of the contract you created. Suppose that
# this address is 0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae
# Step 4: run
# python prepare.py currency=0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae
# Step 5: make sure to include config.js as a javascript file in your
# application.
import sys
import json
import serpent
import os
solidity = None
accounts = {}
# Fill in contract ABI declarations
for f in os.listdir(os.getcwd()):
if f[-3:] == ".se":
accounts[f[:-3]] = {"abi": serpent.mk_full_signature(f)}
elif f[-4:] == ".sol":
if not solidity:
from ethereum import _solidity
solidity = _solidity.get_solidity()
accounts[f[:-4]] = {"abi": solidity.mk_full_signature(open(f).read())}
# Fill in previously known addresses
if 'config.js' in os.listdir(os.getcwd()):
data = open('config.js').read()
code = json.loads(data[data.find('{'):])
# For contracts (ie. objects that contain an 'abi' parameter), if
# we detect a .se or .sol file removed then we do not add the
# associated address from the registry. For regular accounts, we
# transfer all of them over
for k, v in code.items():
if 'address' in v and (k in accounts or 'abi' not in v):
if k not in accounts:
accounts[k] = {}
accounts[k]["address"] = v['address']
# Fill in addresses from sys.argv
for arg in sys.argv:
if '=' in arg:
k, v = arg.split('=')
if len(v) == 40:
v = '0x' + v
if k not in accounts:
accounts[k] = {}
accounts[k]["address"] = v
open('config.js', 'w').write("window.accounts = " + json.dumps(accounts, indent=4))
| mit | -3,283,710,245,870,542,000 | 41.852941 | 83 | 0.683253 | false |
Zhongqilong/mykbengineer | kbe/res/scripts/common/Lib/profile.py | 102 | 22021 | #! /usr/bin/env python3
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Written by James Roskind
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
"""Class for profiling Python code."""
# Copyright Disney Enterprises, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
import time
import marshal
from optparse import OptionParser
__all__ = ["run", "runctx", "Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
class _Utils:
"""Support class for utility functions which are shared by
profile.py and cProfile.py modules.
Not supposed to be used directly.
"""
def __init__(self, profiler):
self.profiler = profiler
def run(self, statement, filename, sort):
prof = self.profiler()
try:
prof.run(statement)
except SystemExit:
pass
finally:
self._show(prof, filename, sort)
def runctx(self, statement, globals, locals, filename, sort):
prof = self.profiler()
try:
prof.runctx(statement, globals, locals)
except SystemExit:
pass
finally:
self._show(prof, filename, sort)
def _show(self, prof, filename, sort):
if filename is not None:
prof.dump_stats(filename)
else:
prof.print_stats(sort)
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
return _Utils(Profile).run(statement, filename, sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
return _Utils(Profile).runctx(statement, globals, locals, filename, sort)
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact (frame and previous tuple). In case an internal error is
detected, the -3 element is used as the function name.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions (this latter is tallied in cur[2]).
[ 2] = Total time spent in subfunctions, excluding time executing the
frame's function (this latter is tallied in cur[1]).
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling).
[-1] = Our parent 6-tuple (corresponds to frame.f_back).
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[-3].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[4] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
bias = 0 # calibration constant
def __init__(self, timer=None, bias=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.c_func_name = ""
if bias is None:
bias = self.bias
self.bias = bias # Materialize in local dict for lookup speed.
if not timer:
self.timer = self.get_time = time.process_time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
length = len(t)
except TypeError:
self.get_time = timer
self.dispatcher = self.trace_dispatch_i
else:
if length == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
# This get_time() implementation needs to be defined
# here to capture the passed-in timer in the parameter
# list (for performance). Note that we can't assume
# the timer() result contains two values in all
# cases.
def get_time_timer(timer=timer, sum=sum):
return sum(timer())
self.get_time = get_time_timer
self.t = self.get_time()
self.simulate_call('profiler')
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
timer = self.timer
t = timer()
t = t[0] + t[1] - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame,t):
t = timer()
self.t = t[0] + t[1]
else:
r = timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
# Dispatch routine for best timer program (return = scalar, fastest if
# an integer but float works too -- and time.clock() relies on that).
def trace_dispatch_i(self, frame, event, arg):
timer = self.timer
t = timer() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()
else:
self.t = timer() - t # put back unrecorded delta
# Dispatch routine for macintosh (timer returns time in ticks of
# 1/60th second)
def trace_dispatch_mac(self, frame, event, arg):
timer = self.timer
t = timer()/60.0 - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()/60.0
else:
self.t = timer()/60.0 - t # put back unrecorded delta
# SLOW generic dispatch routine for timer returning lists of numbers
def trace_dispatch_l(self, frame, event, arg):
get_time = self.get_time
t = get_time() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = get_time()
else:
self.t = get_time() - t # put back unrecorded delta
# In the event handlers, the first 3 elements of self.cur are unpacked
# into vrbls w/ 3-letter names. The last two characters are meant to be
# mnemonic:
# _pt self.cur[0] "parent time" time to be charged to parent frame
# _it self.cur[1] "internal time" time spent directly in the function
# _et self.cur[2] "external time" time spent in subfunctions
def trace_dispatch_exception(self, frame, t):
rpt, rit, ret, rfn, rframe, rcur = self.cur
if (rframe is not frame) and rcur:
return self.trace_dispatch_return(rframe, t)
self.cur = rpt, rit+t, ret, rfn, rframe, rcur
return 1
def trace_dispatch_call(self, frame, t):
if self.cur and frame.f_back is not self.cur[-2]:
rpt, rit, ret, rfn, rframe, rcur = self.cur
if not isinstance(rframe, Profile.fake_frame):
assert rframe.f_back is frame.f_back, ("Bad call", rfn,
rframe, rframe.f_back,
frame, frame.f_back)
self.trace_dispatch_return(rframe, 0)
assert (self.cur is None or \
frame.f_back is self.cur[-2]), ("Bad call",
self.cur[-3])
fcode = frame.f_code
fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns + 1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_c_call (self, frame, t):
fn = ("", 0, self.c_func_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns+1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
if frame is not self.cur[-2]:
assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
self.trace_dispatch_return(self.cur[-2], 0)
# Prefix "r" means part of the Returning or exiting frame.
# Prefix "p" means part of the Previous or Parent or older frame.
rpt, rit, ret, rfn, frame, rcur = self.cur
rit = rit + t
frame_total = rit + ret
ppt, pit, pet, pfn, pframe, pcur = rcur
self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
timings = self.timings
cc, ns, tt, ct, callers = timings[rfn]
if not ns:
# This is the only occurrence of the function on the stack.
# Else this is a (directly or indirectly) recursive call, and
# its cumulative time will get updated when the topmost call to
# it returns.
ct = ct + frame_total
cc = cc + 1
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
# stats such as the amount of time added to ct courtesy
# of this specific call, and the contribution to cc
# courtesy of this call.
else:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
return 1
dispatch = {
"call": trace_dispatch_call,
"exception": trace_dispatch_exception,
"return": trace_dispatch_return,
"c_call": trace_dispatch_c_call,
"c_exception": trace_dispatch_return, # the C function returned
"c_return": trace_dispatch_return,
}
# The next few functions play with self.cmd. By carefully preloading
# our parallel stack, we can force the profiled result to include
# an arbitrary string as the name of the calling function.
# We use self.cmd as that string, and the resulting stats look
# very nice :-).
def set_cmd(self, cmd):
if self.cur[-1]: return # already set
self.cmd = cmd
self.simulate_call(cmd)
class fake_code:
def __init__(self, filename, line, name):
self.co_filename = filename
self.co_line = line
self.co_name = name
self.co_firstlineno = 0
def __repr__(self):
return repr((self.co_filename, self.co_line, self.co_name))
class fake_frame:
def __init__(self, code, prior):
self.f_code = code
self.f_back = prior
def simulate_call(self, name):
code = self.fake_code('profile', 0, name)
if self.cur:
pframe = self.cur[-2]
else:
pframe = None
frame = self.fake_frame(code, pframe)
self.dispatch['call'](self, frame, 0)
# collect stats from pending stack, including getting final
# timings for self.cmd frame.
def simulate_cmd_complete(self):
get_time = self.get_time
t = get_time() - self.t
while self.cur[-1]:
# We *can* cause assertion errors here if
# dispatch_trace_return checks for a frame match!
self.dispatch['return'](self, self.cur[-2], t)
t = 0
self.t = get_time() - t
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort). \
print_stats()
def dump_stats(self, file):
with open(file, 'wb') as f:
self.create_stats()
marshal.dump(self.stats, f)
def create_stats(self):
self.simulate_cmd_complete()
self.snapshot_stats()
def snapshot_stats(self):
self.stats = {}
for func, (cc, ns, tt, ct, callers) in self.timings.items():
callers = callers.copy()
nc = 0
for callcnt in callers.values():
nc += callcnt
self.stats[func] = cc, nc, tt, ct, callers
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.set_cmd(cmd)
sys.setprofile(self.dispatcher)
try:
exec(cmd, globals, locals)
finally:
sys.setprofile(None)
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.set_cmd(repr(func))
sys.setprofile(self.dispatcher)
try:
return func(*args, **kw)
finally:
sys.setprofile(None)
#******************************************************************
# The following calculates the overhead for using a profiler. The
# problem is that it takes a fair amount of time for the profiler
# to stop the stopwatch (from the time it receives an event).
# Similarly, there is a delay from the time that the profiler
# re-starts the stopwatch before the user's code really gets to
# continue. The following code tries to measure the difference on
# a per-event basis.
#
# Note that this difference is only significant if there are a lot of
# events, and relatively little user code per event. For example,
# code with small functions will typically benefit from having the
# profiler calibrated for the current platform. This *could* be
# done on the fly during init() time, but it is not worth the
# effort. Also note that if too large a value specified, then
# execution time on some functions will actually appear as a
# negative number. It is *normal* for some functions (with very
# low call counts) to have such negative stats, even if the
# calibration figure is "correct."
#
# One alternative to profile-time calibration adjustments (i.e.,
# adding in the magic little delta during each event) is to track
# more carefully the number of events (and cumulatively, the number
# of events during sub functions) that are seen. If this were
# done, then the arithmetic could be done after the fact (i.e., at
# display time). Currently, we track only call/return events.
# These values can be deduced by examining the callees and callers
# vectors for each functions. Hence we *can* almost correct the
# internal time figure at print time (note that we currently don't
# track exception event processing counts). Unfortunately, there
# is currently no similar information for cumulative sub-function
# time. It would not be hard to "get all this info" at profiler
# time. Specifically, we would have to extend the tuples to keep
# counts of this in each frame, and then extend the defs of timing
# tuples to include the significant two figures. I'm a bit fearful
# that this additional feature will slow the heavily optimized
# event/time ratio (i.e., the profiler would run slower, fur a very
# low "value added" feature.)
#**************************************************************
def calibrate(self, m, verbose=0):
if self.__class__ is not Profile:
raise TypeError("Subclasses must override .calibrate().")
saved_bias = self.bias
self.bias = 0
try:
return self._calibrate_inner(m, verbose)
finally:
self.bias = saved_bias
def _calibrate_inner(self, m, verbose):
get_time = self.get_time
# Set up a test case to be run with and without profiling. Include
# lots of calls, because we're trying to quantify stopwatch overhead.
# Do not raise any exceptions, though, because we want to know
# exactly how many profile events are generated (one call event, +
# one return event, per Python-level call).
def f1(n):
for i in range(n):
x = 1
def f(m, f1=f1):
for i in range(m):
f1(100)
f(m) # warm up the cache
# elapsed_noprofile <- time f(m) takes without profiling.
t0 = get_time()
f(m)
t1 = get_time()
elapsed_noprofile = t1 - t0
if verbose:
print("elapsed time without profiling =", elapsed_noprofile)
# elapsed_profile <- time f(m) takes with profiling. The difference
# is profiling overhead, only some of which the profiler subtracts
# out on its own.
p = Profile()
t0 = get_time()
p.runctx('f(m)', globals(), locals())
t1 = get_time()
elapsed_profile = t1 - t0
if verbose:
print("elapsed time with profiling =", elapsed_profile)
# reported_time <- "CPU seconds" the profiler charged to f and f1.
total_calls = 0.0
reported_time = 0.0
for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
p.timings.items():
if funcname in ("f", "f1"):
total_calls += cc
reported_time += tt
if verbose:
print("'CPU seconds' profiler reported =", reported_time)
print("total # calls =", total_calls)
if total_calls != m + 1:
raise ValueError("internal error: total calls = %d" % total_calls)
# reported_time - elapsed_noprofile = overhead the profiler wasn't
# able to measure. Divide by twice the number of calls (since there
# are two profiler events per call in this test) to get the hidden
# overhead per event.
mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
if verbose:
print("mean stopwatch overhead per profile event =", mean)
return mean
#****************************************************************************
def main():
usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
| lgpl-3.0 | 5,958,273,207,825,448,000 | 36.387097 | 80 | 0.586894 | false |
alu0100207385/dsi_3Django | django/contrib/gis/db/backends/spatialite/creation.py | 117 | 5699 | import os
from django.conf import settings
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.creation import DatabaseCreation
class SpatiaLiteCreation(DatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
This method is overloaded to load up the SpatiaLite initialization
SQL prior to calling the `syncdb` command.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Need to load the SpatiaLite initialization SQL before running `syncdb`.
self.load_spatialite_sql()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ', ' +
style.SQL_KEYWORD(str(int(not f.null))) +
');')
if f.spatial_index:
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('CreateSpatialIndex') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ');')
return output
def load_spatialite_sql(self):
"""
This routine loads up the SpatiaLite SQL file.
"""
if self.connection.ops.spatial_version[:2] >= (2, 4):
# Spatialite >= 2.4 -- No need to load any SQL file, calling
# InitSpatialMetaData() transparently creates the spatial metadata
# tables
cur = self.connection._cursor()
cur.execute("SELECT InitSpatialMetaData()")
else:
# Spatialite < 2.4 -- Load the initial SQL
# Getting the location of the SpatiaLite SQL file, and confirming
# it exists.
spatialite_sql = self.spatialite_init_file()
if not os.path.isfile(spatialite_sql):
raise ImproperlyConfigured('Could not find the required SpatiaLite initialization '
'SQL file (necessary for testing): %s' % spatialite_sql)
# Opening up the SpatiaLite SQL initialization file and executing
# as a script.
with open(spatialite_sql, 'r') as sql_fh:
cur = self.connection._cursor()
cur.executescript(sql_fh.read())
def spatialite_init_file(self):
# SPATIALITE_SQL may be placed in settings to tell GeoDjango
# to use a specific path to the SpatiaLite initilization SQL.
return getattr(settings, 'SPATIALITE_SQL',
'init_spatialite-%s.%s.sql' %
self.connection.ops.spatial_version[:2])
| bsd-3-clause | 6,522,156,155,211,061,000 | 43.178295 | 103 | 0.593964 | false |
0x19/werkzeug | bench/wzbench.py | 4 | 12441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
wzbench
~~~~~~~
A werkzeug internal benchmark module. It's used in combination with
hg bisect to find out how the Werkzeug performance of some internal
core parts changes over time.
:copyright: 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division
import os
import gc
import sys
import subprocess
from cStringIO import StringIO
from timeit import default_timer as timer
from types import FunctionType
# create a new module where we later store all the werkzeug attributes.
wz = type(sys)('werkzeug_nonlazy')
sys.path.insert(0, '<DUMMY>')
null_out = open(os.devnull, 'w')
# ±4% are ignored
TOLERANCE = 0.04
MIN_RESOLUTION = 0.002
# we run each test 5 times
TEST_RUNS = 5
def find_hg_tag(path):
"""Returns the current node or tag for the given path."""
tags = {}
try:
client = subprocess.Popen(['hg', 'cat', '-r', 'tip', '.hgtags'],
stdout=subprocess.PIPE, cwd=path)
for line in client.communicate()[0].splitlines():
line = line.strip()
if not line:
continue
hash, tag = line.split()
tags[hash] = tag
except OSError:
return
client = subprocess.Popen(['hg', 'parent', '--template', '#node#'],
stdout=subprocess.PIPE, cwd=path)
tip = client.communicate()[0].strip()
tag = tags.get(tip)
if tag is not None:
return tag
return tip
def load_werkzeug(path):
"""Load werkzeug."""
sys.path[0] = path
# get rid of already imported stuff
wz.__dict__.clear()
for key in sys.modules.keys():
if key.startswith('werkzeug.') or key == 'werkzeug':
sys.modules.pop(key, None)
# import werkzeug again.
import werkzeug
for key in werkzeug.__all__:
setattr(wz, key, getattr(werkzeug, key))
# get the hg tag
hg_tag = find_hg_tag(path)
# get the real version from the setup file
try:
f = open(os.path.join(path, 'setup.py'))
except IOError:
pass
else:
try:
for line in f:
line = line.strip()
if line.startswith('version='):
return line[8:].strip(' \t,')[1:-1], hg_tag
finally:
f.close()
print >> sys.stderr, 'Unknown werkzeug version loaded'
sys.exit(2)
def median(seq):
seq = sorted(seq)
if not seq:
return 0.0
return seq[len(seq) // 2]
def format_func(func):
if type(func) is FunctionType:
name = func.__name__
else:
name = func
if name.startswith('time_'):
name = name[5:]
return name.replace('_', ' ').title()
def bench(func):
"""Times a single function."""
sys.stdout.write('%44s ' % format_func(func))
sys.stdout.flush()
# figure out how many times we have to run the function to
# get reliable timings.
for i in xrange(3, 10):
rounds = 1 << i
t = timer()
for x in xrange(rounds):
func()
if timer() - t >= 0.2:
break
# now run the tests without gc TEST_RUNS times and use the median
# value of these runs.
def _run():
gc.collect()
gc.disable()
try:
t = timer()
for x in xrange(rounds):
func()
return (timer() - t) / rounds * 1000
finally:
gc.enable()
delta = median(_run() for x in xrange(TEST_RUNS))
sys.stdout.write('%.4f\n' % delta)
sys.stdout.flush()
return delta
def main():
"""The main entrypoint."""
from optparse import OptionParser
parser = OptionParser(usage='%prog [options]')
parser.add_option('--werkzeug-path', '-p', dest='path', default='..',
help='the path to the werkzeug package. defaults to cwd')
parser.add_option('--compare', '-c', dest='compare', nargs=2,
default=False, help='compare two hg nodes of Werkzeug')
parser.add_option('--init-compare', dest='init_compare',
action='store_true', default=False,
help='Initializes the comparison feature')
options, args = parser.parse_args()
if args:
parser.error('Script takes no arguments')
if options.compare:
compare(*options.compare)
elif options.init_compare:
init_compare()
else:
run(options.path)
def init_compare():
"""Initializes the comparison feature."""
print 'Initializing comparison feature'
subprocess.Popen(['hg', 'clone', '..', 'a']).wait()
subprocess.Popen(['hg', 'clone', '..', 'b']).wait()
def compare(node1, node2):
"""Compares two Werkzeug hg versions."""
if not os.path.isdir('a'):
print >> sys.stderr, 'error: comparison feature not initialized'
sys.exit(4)
print '=' * 80
print 'WERKZEUG INTERNAL BENCHMARK -- COMPARE MODE'.center(80)
print '-' * 80
delim = '-' * 20
def _error(msg):
print >> sys.stderr, 'error:', msg
sys.exit(1)
def _hg_update(repo, node):
hg = lambda *x: subprocess.call(['hg'] + list(x), cwd=repo,
stdout=null_out, stderr=null_out)
hg('revert', '-a', '--no-backup')
client = subprocess.Popen(['hg', 'status', '--unknown', '-n', '-0'],
stdout=subprocess.PIPE, cwd=repo)
unknown = client.communicate()[0]
if unknown:
client = subprocess.Popen(['xargs', '-0', 'rm', '-f'], cwd=repo,
stdout=null_out, stdin=subprocess.PIPE)
client.communicate(unknown)
hg('pull', '../..')
hg('update', node)
if node == 'tip':
diff = subprocess.Popen(['hg', 'diff'], cwd='..',
stdout=subprocess.PIPE).communicate()[0]
if diff:
client = subprocess.Popen(['hg', 'import', '--no-commit', '-'],
cwd=repo, stdout=null_out,
stdin=subprocess.PIPE)
client.communicate(diff)
_hg_update('a', node1)
_hg_update('b', node2)
d1 = run('a', no_header=True)
d2 = run('b', no_header=True)
print 'DIRECT COMPARISON'.center(80)
print '-' * 80
for key in sorted(d1):
delta = d1[key] - d2[key]
if abs(1 - d1[key] / d2[key]) < TOLERANCE or \
abs(delta) < MIN_RESOLUTION:
delta = '=='
else:
delta = '%+.4f (%+d%%)' % \
(delta, round(d2[key] / d1[key] * 100 - 100))
print '%36s %.4f %.4f %s' % \
(format_func(key), d1[key], d2[key], delta)
print '-' * 80
def run(path, no_header=False):
path = os.path.abspath(path)
wz_version, hg_tag = load_werkzeug(path)
result = {}
if not no_header:
print '=' * 80
print 'WERKZEUG INTERNAL BENCHMARK'.center(80)
print '-' * 80
print 'Path: %s' % path
print 'Version: %s' % wz_version
if hg_tag is not None:
print 'HG Tag: %s' % hg_tag
print '-' * 80
for key, value in sorted(globals().items()):
if key.startswith('time_'):
before = globals().get('before_' + key[5:])
if before:
before()
result[key] = bench(value)
after = globals().get('after_' + key[5:])
if after:
after()
print '-' * 80
return result
URL_DECODED_DATA = dict((str(x), str(x)) for x in xrange(100))
URL_ENCODED_DATA = '&'.join('%s=%s' % x for x in URL_DECODED_DATA.items())
MULTIPART_ENCODED_DATA = '\n'.join((
'--foo',
'Content-Disposition: form-data; name=foo',
'',
'this is just bar',
'--foo',
'Content-Disposition: form-data; name=bar',
'',
'blafasel',
'--foo',
'Content-Disposition: form-data; name=foo; filename=wzbench.py',
'Content-Type: text/plain',
'',
open(__file__.rstrip('c')).read(),
'--foo--'
))
MULTIDICT = None
REQUEST = None
TEST_ENV = None
LOCAL = None
LOCAL_MANAGER = None
def time_url_decode():
wz.url_decode(URL_ENCODED_DATA)
def time_url_encode():
wz.url_encode(URL_DECODED_DATA)
def time_parse_form_data_multipart():
# use a hand written env creator so that we don't bench
# from_values which is known to be slowish in 0.5.1 and higher.
# we don't want to bench two things at once.
environ = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo',
'wsgi.input': StringIO(MULTIPART_ENCODED_DATA),
'CONTENT_LENGTH': str(len(MULTIPART_ENCODED_DATA))
}
request = wz.Request(environ)
request.form
def before_multidict_lookup_hit():
global MULTIDICT
MULTIDICT = wz.MultiDict({'foo': 'bar'})
def time_multidict_lookup_hit():
MULTIDICT['foo']
def after_multidict_lookup_hit():
global MULTIDICT
MULTIDICT = None
def before_multidict_lookup_miss():
global MULTIDICT
MULTIDICT = wz.MultiDict()
def time_multidict_lookup_miss():
try:
MULTIDICT['foo']
except KeyError:
pass
def after_multidict_lookup_miss():
global MULTIDICT
MULTIDICT = None
def time_cached_property():
class Foo(object):
@wz.cached_property
def x(self):
return 42
f = Foo()
for x in xrange(60):
f.x
def before_request_form_access():
global REQUEST
data = 'foo=bar&blah=blub'
REQUEST = wz.Request({
'CONTENT_LENGTH': str(len(data)),
'wsgi.input': StringIO(data),
'REQUEST_METHOD': 'POST',
'wsgi.version': (1, 0),
'QUERY_STRING': data,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'PATH_INFO': '/',
'SCRIPT_NAME': ''
})
def time_request_form_access():
for x in xrange(30):
REQUEST.path
REQUEST.script_root
REQUEST.args['foo']
REQUEST.form['foo']
def after_request_form_access():
global REQUEST
REQUEST = None
def time_request_from_values():
wz.Request.from_values(base_url='http://www.google.com/',
query_string='foo=bar&blah=blaz',
input_stream=StringIO(MULTIPART_ENCODED_DATA),
content_length=len(MULTIPART_ENCODED_DATA),
content_type='multipart/form-data; '
'boundary=foo', method='POST')
def before_request_shallow_init():
global TEST_ENV
TEST_ENV = wz.create_environ()
def time_request_shallow_init():
wz.Request(TEST_ENV, shallow=True)
def after_request_shallow_init():
global TEST_ENV
TEST_ENV = None
def time_response_iter_performance():
resp = wz.Response(u'Hällo Wörld ' * 1000,
mimetype='text/html')
for item in resp({'REQUEST_METHOD': 'GET'}, lambda *s: None):
pass
def time_response_iter_head_performance():
resp = wz.Response(u'Hällo Wörld ' * 1000,
mimetype='text/html')
for item in resp({'REQUEST_METHOD': 'HEAD'}, lambda *s: None):
pass
def before_local_manager_dispatch():
global LOCAL_MANAGER, LOCAL
LOCAL = wz.Local()
LOCAL_MANAGER = wz.LocalManager([LOCAL])
def time_local_manager_dispatch():
for x in xrange(10):
LOCAL.x = 42
for x in xrange(10):
LOCAL.x
def after_local_manager_dispatch():
global LOCAL_MANAGER, LOCAL
LOCAL = LOCAL_MANAGER = None
def before_html_builder():
global TABLE
TABLE = [['col 1', 'col 2', 'col 3', '4', '5', '6'] for x in range(10)]
def time_html_builder():
html_rows = []
for row in TABLE:
html_cols = [wz.html.td(col, class_='col') for col in row]
html_rows.append(wz.html.tr(class_='row', *html_cols))
table = wz.html.table(*html_rows)
def after_html_builder():
global TABLE
TABLE = None
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__) or os.path.curdir)
try:
main()
except KeyboardInterrupt:
print >> sys.stderr, 'interrupted!'
| bsd-3-clause | -5,434,094,080,824,894,000 | 26.513274 | 79 | 0.551383 | false |
ovnicraft/edx-platform | common/djangoapps/course_action_state/tests/test_managers.py | 126 | 7219 | # pylint: disable=invalid-name, attribute-defined-outside-init
"""
Tests for basic common operations related to Course Action State managers
"""
from ddt import ddt, data
from django.test import TestCase
from collections import namedtuple
from opaque_keys.edx.locations import CourseLocator
from course_action_state.models import CourseRerunState
from course_action_state.managers import CourseActionStateItemNotFoundError
# Sequence of Action models to be tested with ddt.
COURSE_ACTION_STATES = (CourseRerunState, )
class TestCourseActionStateManagerBase(TestCase):
"""
Base class for testing Course Action State Managers.
"""
def setUp(self):
super(TestCourseActionStateManagerBase, self).setUp()
self.course_key = CourseLocator("test_org", "test_course_num", "test_run")
@ddt
class TestCourseActionStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionStateManager.
"""
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found_is_false(self, action_class):
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.update_state(self.course_key, "fake_state", allow_not_found=False)
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found(self, action_class):
action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
self.assertIsNotNone(
action_class.objects.find_first(course_key=self.course_key)
)
@data(*COURSE_ACTION_STATES)
def test_delete(self, action_class):
obj = action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
action_class.objects.delete(obj.id)
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.find_first(course_key=self.course_key)
@ddt
class TestCourseActionUIStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionUIStateManager.
"""
def init_course_action_states(self, action_class):
"""
Creates course action state entries with different states for the given action model class.
Creates both displayable (should_display=True) and non-displayable (should_display=False) entries.
"""
def create_course_states(starting_course_num, ending_course_num, state, should_display=True):
"""
Creates a list of course state tuples by creating unique course locators with course-numbers
from starting_course_num to ending_course_num.
"""
CourseState = namedtuple('CourseState', 'course_key, state, should_display')
return [
CourseState(CourseLocator("org", "course", "run" + str(num)), state, should_display)
for num in range(starting_course_num, ending_course_num)
]
NUM_COURSES_WITH_STATE1 = 3
NUM_COURSES_WITH_STATE2 = 3
NUM_COURSES_WITH_STATE3 = 3
NUM_COURSES_NON_DISPLAYABLE = 3
# courses with state1 and should_display=True
self.courses_with_state1 = create_course_states(
0,
NUM_COURSES_WITH_STATE1,
'state1'
)
# courses with state2 and should_display=True
self.courses_with_state2 = create_course_states(
NUM_COURSES_WITH_STATE1,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
'state2'
)
# courses with state3 and should_display=True
self.courses_with_state3 = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
'state3'
)
# all courses with should_display=True
self.course_actions_displayable_states = (
self.courses_with_state1 + self.courses_with_state2 + self.courses_with_state3
)
# courses with state3 and should_display=False
self.courses_with_state3_non_displayable = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3 + NUM_COURSES_NON_DISPLAYABLE,
'state3',
should_display=False,
)
# create course action states for all courses
for CourseState in self.course_actions_displayable_states + self.courses_with_state3_non_displayable:
action_class.objects.update_state(
CourseState.course_key,
CourseState.state,
should_display=CourseState.should_display,
allow_not_found=True
)
def assertCourseActionStatesEqual(self, expected, found):
"""Asserts that the set of course keys in the expected state equal those that are found"""
self.assertSetEqual(
set(course_action_state.course_key for course_action_state in expected),
set(course_action_state.course_key for course_action_state in found))
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display(self, action_class):
self.init_course_action_states(action_class)
self.assertCourseActionStatesEqual(
self.course_actions_displayable_states,
action_class.objects.find_all(should_display=True),
)
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display_filter_exclude(self, action_class):
self.init_course_action_states(action_class)
for course_action_state, filter_state, exclude_state in (
(self.courses_with_state1, 'state1', None), # filter for state1
(self.courses_with_state2, 'state2', None), # filter for state2
(self.courses_with_state2 + self.courses_with_state3, None, 'state1'), # exclude state1
(self.courses_with_state1 + self.courses_with_state3, None, 'state2'), # exclude state2
(self.courses_with_state1, 'state1', 'state2'), # filter for state1, exclude state2
([], 'state1', 'state1'), # filter for state1, exclude state1
):
self.assertCourseActionStatesEqual(
course_action_state,
action_class.objects.find_all(
exclude_args=({'state': exclude_state} if exclude_state else None),
should_display=True,
**({'state': filter_state} if filter_state else {})
)
)
def test_kwargs_in_update_state(self):
destination_course_key = CourseLocator("org", "course", "run")
source_course_key = CourseLocator("source_org", "source_course", "source_run")
CourseRerunState.objects.update_state(
course_key=destination_course_key,
new_state='state1',
allow_not_found=True,
source_course_key=source_course_key,
)
found_action_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
self.assertEquals(source_course_key, found_action_state.source_course_key)
| agpl-3.0 | -2,961,681,727,057,095,700 | 44.11875 | 118 | 0.655492 | false |
nanolearningllc/edx-platform-cypress-2 | cms/djangoapps/contentstore/courseware_index.py | 60 | 26790 | """ Code to allow module store to interface with courseware index """
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from datetime import timedelta
import logging
import re
from six import add_metaclass
from django.conf import settings
from django.utils.translation import ugettext as _
from django.core.urlresolvers import resolve
from contentstore.utils import course_image_url
from contentstore.course_group_config import GroupConfiguration
from course_modes.models import CourseMode
from eventtracking import tracker
from search.search_engine_base import SearchEngine
from xmodule.annotator_mixin import html_to_text
from xmodule.modulestore import ModuleStoreEnum
from xmodule.library_tools import normalize_key_for_search
# REINDEX_AGE is the default amount of time that we look back for changes
# that might have happened. If we are provided with a time at which the
# indexing is triggered, then we know it is safe to only index items
# recently changed at that time. This is the time period that represents
# how far back from the trigger point to look back in order to index
REINDEX_AGE = timedelta(0, 60) # 60 seconds
log = logging.getLogger('edx.modulestore')
def strip_html_content_to_text(html_content):
""" Gets only the textual part for html content - useful for building text to be searched """
# Removing HTML-encoded non-breaking space characters
text_content = re.sub(r"(\s| |//)+", " ", html_to_text(html_content))
# Removing HTML CDATA
text_content = re.sub(r"<!\[CDATA\[.*\]\]>", "", text_content)
# Removing HTML comments
text_content = re.sub(r"<!--.*-->", "", text_content)
return text_content
def indexing_is_enabled():
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False)
class SearchIndexingError(Exception):
""" Indicates some error(s) occured during indexing """
def __init__(self, message, error_list):
super(SearchIndexingError, self).__init__(message)
self.error_list = error_list
@add_metaclass(ABCMeta)
class SearchIndexerBase(object):
"""
Base class to perform indexing for courseware or library search from different modulestores
"""
__metaclass__ = ABCMeta
INDEX_NAME = None
DOCUMENT_TYPE = None
ENABLE_INDEXING_KEY = None
INDEX_EVENT = {
'name': None,
'category': None
}
@classmethod
def indexing_is_enabled(cls):
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get(cls.ENABLE_INDEXING_KEY, False)
@classmethod
@abstractmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
@classmethod
@abstractmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
@classmethod
@abstractmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id
@classmethod
def remove_deleted_items(cls, searcher, structure_key, exclude_items):
"""
remove any item that is present in the search index that is not present in updated list of indexed items
as we find items we can shorten the set of items to keep
"""
response = searcher.search(
doc_type=cls.DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key),
exclude_dictionary={"id": list(exclude_items)}
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DOCUMENT_TYPE, result_ids)
@classmethod
def index(cls, modulestore, structure_key, triggered_at=None, reindex_age=REINDEX_AGE):
"""
Process course for indexing
Arguments:
modulestore - modulestore object to use for operations
structure_key (CourseKey|LibraryKey) - course or library identifier
triggered_at (datetime) - provides time at which indexing was triggered;
useful for index updates - only things changed recently from that date
(within REINDEX_AGE above ^^) will have their index updated, others skip
updating their index but are still walked through in order to identify
which items may need to be removed from the index
If None, then a full reindex takes place
Returns:
Number of items that have been added to the index
"""
error_list = []
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
structure_key = cls.normalize_structure_key(structure_key)
location_info = cls._get_location_info(structure_key)
# Wrap counter in dictionary - otherwise we seem to lose scope inside the embedded function `prepare_item_index`
indexed_count = {
"count": 0
}
# indexed_items is a list of all the items that we wish to remain in the
# index, whether or not we are planning to actually update their index.
# This is used in order to build a query to remove those items not in this
# list - those are ready to be destroyed
indexed_items = set()
# items_index is a list of all the items index dictionaries.
# it is used to collect all indexes and index them using bulk API,
# instead of per item index API call.
items_index = []
def get_item_location(item):
"""
Gets the version agnostic item location
"""
return item.location.version_agnostic().replace(branch=None)
def prepare_item_index(item, skip_index=False, groups_usage_info=None):
"""
Add this item to the items_index and indexed_items list
Arguments:
item - item to add to index, its children will be processed recursively
skip_index - simply walk the children in the tree, the content change is
older than the REINDEX_AGE window and would have been already indexed.
This should really only be passed from the recursive child calls when
this method has determined that it is safe to do so
Returns:
item_content_groups - content groups assigned to indexed item
"""
is_indexable = hasattr(item, "index_dictionary")
item_index_dictionary = item.index_dictionary() if is_indexable else None
# if it's not indexable and it does not have children, then ignore
if not item_index_dictionary and not item.has_children:
return
item_content_groups = None
if item.category == "split_test":
split_partition = item.get_selected_partition()
for split_test_child in item.get_children():
if split_partition:
for group in split_partition.groups:
group_id = unicode(group.id)
child_location = item.group_id_to_child.get(group_id, None)
if child_location == split_test_child.location:
groups_usage_info.update({
unicode(get_item_location(split_test_child)): [group_id],
})
for component in split_test_child.get_children():
groups_usage_info.update({
unicode(get_item_location(component)): [group_id]
})
if groups_usage_info:
item_location = get_item_location(item)
item_content_groups = groups_usage_info.get(unicode(item_location), None)
item_id = unicode(cls._id_modifier(item.scope_ids.usage_id))
indexed_items.add(item_id)
if item.has_children:
# determine if it's okay to skip adding the children herein based upon how recently any may have changed
skip_child_index = skip_index or \
(triggered_at is not None and (triggered_at - item.subtree_edited_on) > reindex_age)
children_groups_usage = []
for child_item in item.get_children():
if modulestore.has_published_version(child_item):
children_groups_usage.append(
prepare_item_index(
child_item,
skip_index=skip_child_index,
groups_usage_info=groups_usage_info
)
)
if None in children_groups_usage:
item_content_groups = None
if skip_index or not item_index_dictionary:
return
item_index = {}
# if it has something to add to the index, then add it
try:
item_index.update(location_info)
item_index.update(item_index_dictionary)
item_index['id'] = item_id
if item.start:
item_index['start_date'] = item.start
item_index['content_groups'] = item_content_groups if item_content_groups else None
item_index.update(cls.supplemental_fields(item))
items_index.append(item_index)
indexed_count["count"] += 1
return item_content_groups
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not fail on one item of many
log.warning('Could not index item: %s - %r', item.location, err)
error_list.append(_('Could not index item: {}').format(item.location))
try:
with modulestore.branch_setting(ModuleStoreEnum.RevisionOption.published_only):
structure = cls._fetch_top_level(modulestore, structure_key)
groups_usage_info = cls.fetch_group_usage(modulestore, structure)
# First perform any additional indexing from the structure object
cls.supplemental_index_information(modulestore, structure)
# Now index the content
for item in structure.get_children():
prepare_item_index(item, groups_usage_info=groups_usage_info)
searcher.index(cls.DOCUMENT_TYPE, items_index)
cls.remove_deleted_items(searcher, structure_key, indexed_items)
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not prevent the rest of the application from working
log.exception(
"Indexing error encountered, courseware index may be out of date %s - %r",
structure_key,
err
)
error_list.append(_('General indexing error occurred'))
if error_list:
raise SearchIndexingError('Error(s) present during indexing', error_list)
return indexed_count["count"]
@classmethod
def _do_reindex(cls, modulestore, structure_key):
"""
(Re)index all content within the given structure (course or library),
tracking the fact that a full reindex has taken place
"""
indexed_count = cls.index(modulestore, structure_key)
if indexed_count:
cls._track_index_request(cls.INDEX_EVENT['name'], cls.INDEX_EVENT['category'], indexed_count)
return indexed_count
@classmethod
def _track_index_request(cls, event_name, category, indexed_count):
"""Track content index requests.
Arguments:
event_name (str): Name of the event to be logged.
category (str): category of indexed items
indexed_count (int): number of indexed items
Returns:
None
"""
data = {
"indexed_count": indexed_count,
'category': category,
}
tracker.emit(
event_name,
data
)
@classmethod
def fetch_group_usage(cls, modulestore, structure): # pylint: disable=unused-argument
"""
Base implementation of fetch group usage on course/library.
"""
return None
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform any supplemental indexing given that the structure object has
already been loaded. Base implementation performs no operation.
Arguments:
modulestore - modulestore object used during the indexing operation
structure - structure object loaded during the indexing job
Returns:
None
"""
pass
@classmethod
def supplemental_fields(cls, item): # pylint: disable=unused-argument
"""
Any supplemental fields that get added to the index for the specified
item. Base implementation returns an empty dictionary
"""
return {}
class CoursewareSearchIndexer(SearchIndexerBase):
"""
Class to perform indexing for courseware search from different modulestores
"""
INDEX_NAME = "courseware_index"
DOCUMENT_TYPE = "courseware_content"
ENABLE_INDEXING_KEY = 'ENABLE_COURSEWARE_INDEX'
INDEX_EVENT = {
'name': 'edx.course.index.reindexed',
'category': 'courseware_index'
}
UNNAMED_MODULE_NAME = _("(Unnamed)")
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return structure_key
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_course(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def do_course_reindex(cls, modulestore, course_key):
"""
(Re)index all content within the given course, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, course_key)
@classmethod
def fetch_group_usage(cls, modulestore, structure):
groups_usage_dict = {}
groups_usage_info = GroupConfiguration.get_content_groups_usage_info(modulestore, structure).items()
groups_usage_info.extend(
GroupConfiguration.get_content_groups_items_usage_info(
modulestore,
structure
).items()
)
if groups_usage_info:
for name, group in groups_usage_info:
for module in group:
view, args, kwargs = resolve(module['url']) # pylint: disable=unused-variable
usage_key_string = unicode(kwargs['usage_key_string'])
if groups_usage_dict.get(usage_key_string, None):
groups_usage_dict[usage_key_string].append(name)
else:
groups_usage_dict[usage_key_string] = [name]
return groups_usage_dict
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform additional indexing from loaded structure object
"""
CourseAboutSearchIndexer.index_about_information(modulestore, structure)
@classmethod
def supplemental_fields(cls, item):
"""
Add location path to the item object
Once we've established the path of names, the first name is the course
name, and the next 3 names are the navigable path within the edx
application. Notice that we stop at that level because a full path to
deep children would be confusing.
"""
location_path = []
parent = item
while parent is not None:
path_component_name = parent.display_name
if not path_component_name:
path_component_name = cls.UNNAMED_MODULE_NAME
location_path.append(path_component_name)
parent = parent.get_parent()
location_path.reverse()
return {
"course_name": location_path[0],
"location": location_path[1:4]
}
class LibrarySearchIndexer(SearchIndexerBase):
"""
Base class to perform indexing for library search from different modulestores
"""
INDEX_NAME = "library_index"
DOCUMENT_TYPE = "library_content"
ENABLE_INDEXING_KEY = 'ENABLE_LIBRARY_INDEX'
INDEX_EVENT = {
'name': 'edx.library.index.reindexed',
'category': 'library_index'
}
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return normalize_key_for_search(structure_key)
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_library(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"library": unicode(normalized_structure_key)}
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id.replace(library_key=(usage_id.library_key.replace(version_guid=None, branch=None)))
@classmethod
def do_library_reindex(cls, modulestore, library_key):
"""
(Re)index all content within the given library, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, library_key)
class AboutInfo(object):
""" About info structure to contain
1) Property name to use
2) Where to add in the index (using flags above)
3) Where to source the properties value
"""
# Bitwise Flags for where to index the information
#
# ANALYSE - states that the property text contains content that we wish to be able to find matched within
# e.g. "joe" should yield a result for "I'd like to drink a cup of joe"
#
# PROPERTY - states that the property text should be a property of the indexed document, to be returned with the
# results: search matches will only be made on exact string matches
# e.g. "joe" will only match on "joe"
#
# We are using bitwise flags because one may want to add the property to EITHER or BOTH parts of the index
# e.g. university name is desired to be analysed, so that a search on "Oxford" will match
# property values "University of Oxford" and "Oxford Brookes University",
# but it is also a useful property, because within a (future) filtered search a user
# may have chosen to filter courses from "University of Oxford"
#
# see https://wiki.python.org/moin/BitwiseOperators for information about bitwise shift operator used below
#
ANALYSE = 1 << 0 # Add the information to the analysed content of the index
PROPERTY = 1 << 1 # Add the information as a property of the object being indexed (not analysed)
def __init__(self, property_name, index_flags, source_from):
self.property_name = property_name
self.index_flags = index_flags
self.source_from = source_from
def get_value(self, **kwargs):
""" get the value for this piece of information, using the correct source """
return self.source_from(self, **kwargs)
def from_about_dictionary(self, **kwargs):
""" gets the value from the kwargs provided 'about_dictionary' """
about_dictionary = kwargs.get('about_dictionary', None)
if not about_dictionary:
raise ValueError("Context dictionary does not contain expected argument 'about_dictionary'")
return about_dictionary.get(self.property_name, None)
def from_course_property(self, **kwargs):
""" gets the value from the kwargs provided 'course' """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return getattr(course, self.property_name, None)
def from_course_mode(self, **kwargs):
""" fetches the available course modes from the CourseMode model """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return [mode.slug for mode in CourseMode.modes_for_course(course.id)]
# Source location options - either from the course or the about info
FROM_ABOUT_INFO = from_about_dictionary
FROM_COURSE_PROPERTY = from_course_property
FROM_COURSE_MODE = from_course_mode
class CourseAboutSearchIndexer(object):
"""
Class to perform indexing of about information from course object
"""
DISCOVERY_DOCUMENT_TYPE = "course_info"
INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME
# List of properties to add to the index - each item in the list is an instance of AboutInfo object
ABOUT_INFORMATION_TO_INCLUDE = [
AboutInfo("advertised_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("announcement", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("effort", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("display_name", AboutInfo.ANALYSE, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("overview", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("title", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("university", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("number", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("short_description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("key_dates", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("video", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_short", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_extended", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("requirements", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("syllabus", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("textbook", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("faq", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("more_info", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("ocw_links", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("enrollment_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("enrollment_end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("org", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("modes", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_MODE),
AboutInfo("language", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
]
@classmethod
def index_about_information(cls, modulestore, course):
"""
Add the given course to the course discovery index
Arguments:
modulestore - modulestore object to use for operations
course - course object from which to take properties, locate about information
"""
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
course_id = unicode(course.id)
course_info = {
'id': course_id,
'course': course_id,
'content': {},
'image_url': course_image_url(course),
}
# load data for all of the 'about' modules for this course into a dictionary
about_dictionary = {
item.location.name: item.data
for item in modulestore.get_items(course.id, qualifiers={"category": "about"})
}
about_context = {
"course": course,
"about_dictionary": about_dictionary,
}
for about_information in cls.ABOUT_INFORMATION_TO_INCLUDE:
# Broad exception handler so that a single bad property does not scupper the collection of others
try:
section_content = about_information.get_value(**about_context)
except: # pylint: disable=bare-except
section_content = None
log.warning(
"Course discovery could not collect property %s for course %s",
about_information.property_name,
course_id,
exc_info=True,
)
if section_content:
if about_information.index_flags & AboutInfo.ANALYSE:
analyse_content = section_content
if isinstance(section_content, basestring):
analyse_content = strip_html_content_to_text(section_content)
course_info['content'][about_information.property_name] = analyse_content
if about_information.index_flags & AboutInfo.PROPERTY:
course_info[about_information.property_name] = section_content
# Broad exception handler to protect around and report problems with indexing
try:
searcher.index(cls.DISCOVERY_DOCUMENT_TYPE, [course_info])
except: # pylint: disable=bare-except
log.exception(
"Course discovery indexing error encountered, course discovery index may be out of date %s",
course_id,
)
raise
log.debug(
"Successfully added %s course to the course discovery index",
course_id
)
| agpl-3.0 | 1,328,666,044,457,507,800 | 40.728972 | 120 | 0.628705 | false |
dongjoon-hyun/tensorflow | tensorflow/python/saved_model/utils_test.py | 62 | 5133 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import utils
class UtilsTest(test.TestCase):
def testBuildTensorInfoDense(self):
x = array_ops.placeholder(dtypes.float32, 1, name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual("x:0", x_tensor_info.name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(1, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(1, x_tensor_info.tensor_shape.dim[0].size)
def testBuildTensorInfoSparse(self):
x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual(x.values.name,
x_tensor_info.coo_sparse.values_tensor_name)
self.assertEqual(x.indices.name,
x_tensor_info.coo_sparse.indices_tensor_name)
self.assertEqual(x.dense_shape.name,
x_tensor_info.coo_sparse.dense_shape_tensor_name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)
self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)
def testGetTensorFromInfoDense(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, ops.Tensor)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoSparse(self):
expected = array_ops.sparse_placeholder(dtypes.float32, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, sparse_tensor.SparseTensor)
self.assertEqual(expected.values.name, actual.values.name)
self.assertEqual(expected.indices.name, actual.indices.name)
self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)
def testGetTensorFromInfoInOtherGraph(self):
with ops.Graph().as_default() as expected_graph:
expected = array_ops.placeholder(dtypes.float32, 1, name="right")
tensor_info = utils.build_tensor_info(expected)
with ops.Graph().as_default(): # Some other graph.
array_ops.placeholder(dtypes.float32, 1, name="other")
actual = utils.get_tensor_from_tensor_info(tensor_info,
graph=expected_graph)
self.assertIsInstance(actual, ops.Tensor)
self.assertIs(actual.graph, expected_graph)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoInScope(self):
# Build a TensorInfo with name "bar/x:0".
with ops.Graph().as_default():
with ops.name_scope("bar"):
unscoped = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(unscoped)
self.assertEqual("bar/x:0", tensor_info.name)
# Build a graph with node "foo/bar/x:0", akin to importing into scope foo.
with ops.Graph().as_default():
with ops.name_scope("foo"):
with ops.name_scope("bar"):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
self.assertEqual("foo/bar/x:0", expected.name)
# Test that tensor is found by prepending the import scope.
actual = utils.get_tensor_from_tensor_info(tensor_info,
import_scope="foo")
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoRaisesErrors(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
tensor_info.name = "blah:0" # Nonexistant name.
with self.assertRaises(KeyError):
utils.get_tensor_from_tensor_info(tensor_info)
tensor_info.ClearField("name") # Malformed (missing encoding).
with self.assertRaises(ValueError):
utils.get_tensor_from_tensor_info(tensor_info)
if __name__ == "__main__":
test.main()
| apache-2.0 | -1,380,299,723,406,448,400 | 45.243243 | 80 | 0.696863 | false |
gymnasium/edx-platform | lms/djangoapps/certificates/tests/tests.py | 9 | 10197 | """
Tests for the certificates models.
"""
from datetime import datetime, timedelta
from ddt import data, ddt, unpack
from pytz import UTC
from django.conf import settings
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from nose.plugins.attrib import attr
from badges.tests.factories import CourseCompleteImageConfigurationFactory
from lms.djangoapps.certificates.models import (
CertificateStatuses,
GeneratedCertificate,
certificate_info_for_user,
certificate_status_for_student
)
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from student.models import CourseEnrollment
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.milestones_helpers import milestones_achieved_by_user, set_prerequisite_courses
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
@ddt
class CertificatesModelTest(ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Tests for the GeneratedCertificate model
"""
def setUp(self):
super(CertificatesModelTest, self).setUp()
today = datetime.now(UTC)
self.instructor_paced_course = CourseFactory.create(
org='edx', number='instructor', display_name='Instructor Paced Course',
start=today - timedelta(days=30),
end=today - timedelta(days=2),
certificate_available_date=today - timedelta(days=1),
self_paced=False
)
self.self_paced_course = CourseFactory.create(
org='edx', number='self',
display_name='Self Paced Course', self_paced=True
)
def test_certificate_status_for_student(self):
student = UserFactory()
course = CourseFactory.create(org='edx', number='verified', display_name='Verified Course')
certificate_status = certificate_status_for_student(student, course.id)
self.assertEqual(certificate_status['status'], CertificateStatuses.unavailable)
self.assertEqual(certificate_status['mode'], GeneratedCertificate.MODES.honor)
@unpack
@data(
{'allow_certificate': False, 'whitelisted': False, 'grade': None, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': True, 'grade': None, 'output': ['Y', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.9, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': False, 'whitelisted': True, 'grade': 0.8, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': False, 'whitelisted': None, 'grade': 0.8, 'output': ['N', 'N', 'N/A']}
)
def test_certificate_info_for_user(self, allow_certificate, whitelisted, grade, output):
"""
Verify that certificate_info_for_user works.
"""
student = UserFactory()
student.profile.allow_certificate = allow_certificate
student.profile.save()
# for instructor paced course
certificate_info = certificate_info_for_user(
student, self.instructor_paced_course.id, grade,
whitelisted, user_certificate=None
)
self.assertEqual(certificate_info, output)
# for self paced course
certificate_info = certificate_info_for_user(
student, self.self_paced_course.id, grade,
whitelisted, user_certificate=None
)
self.assertEqual(certificate_info, output)
@unpack
@data(
{'allow_certificate': False, 'whitelisted': False, 'grade': None, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': True, 'grade': None, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.9, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': True, 'grade': 0.8, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': None, 'grade': 0.8, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': None, 'grade': None, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': True, 'grade': None, 'output': ['N', 'Y', 'honor']}
)
def test_certificate_info_for_user_when_grade_changes(self, allow_certificate, whitelisted, grade, output):
"""
Verify that certificate_info_for_user works as expect in scenario when grading of problems
changes after certificates already generated. In such scenario `Certificate delivered` should not depend
on student's eligibility to get certificates since in above scenario eligibility can change over period
of time.
"""
student = UserFactory()
student.profile.allow_certificate = allow_certificate
student.profile.save()
certificate1 = GeneratedCertificateFactory.create(
user=student,
course_id=self.instructor_paced_course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
certificate2 = GeneratedCertificateFactory.create(
user=student,
course_id=self.self_paced_course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
# for instructor paced course
certificate_info = certificate_info_for_user(
student, self.instructor_paced_course.id, grade,
whitelisted, certificate1
)
self.assertEqual(certificate_info, output)
# for self paced course
certificate_info = certificate_info_for_user(
student, self.self_paced_course.id, grade,
whitelisted, certificate2
)
self.assertEqual(certificate_info, output)
@unpack
@data(
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.8, 'mode': 'audit', 'output': ['N', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': True, 'grade': 0.8, 'mode': 'audit', 'output': ['Y', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.8, 'mode': 'verified', 'output': ['Y', 'N', 'N/A']}
)
def test_certificate_info_for_user_with_course_modes(self, allow_certificate, whitelisted, grade, mode, output):
"""
Verify that certificate_info_for_user works with course modes.
"""
user = UserFactory.create()
user.profile.allow_certificate = allow_certificate
user.profile.save()
_ = CourseEnrollment.enroll(user, self.instructor_paced_course.id, mode)
certificate_info = certificate_info_for_user(
user, self.instructor_paced_course.id, grade,
whitelisted, user_certificate=None
)
self.assertEqual(certificate_info, output)
def test_course_ids_with_certs_for_user(self):
# Create one user with certs and one without
student_no_certs = UserFactory()
student_with_certs = UserFactory()
student_with_certs.profile.allow_certificate = True
student_with_certs.profile.save()
# Set up a couple of courses
course_1 = CourseFactory.create()
course_2 = CourseFactory.create()
# Generate certificates
GeneratedCertificateFactory.create(
user=student_with_certs,
course_id=course_1.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
GeneratedCertificateFactory.create(
user=student_with_certs,
course_id=course_2.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
# User with no certs should return an empty set.
self.assertSetEqual(
GeneratedCertificate.course_ids_with_certs_for_user(student_no_certs),
set()
)
# User with certs should return a set with the two course_ids
self.assertSetEqual(
GeneratedCertificate.course_ids_with_certs_for_user(student_with_certs),
{course_1.id, course_2.id}
)
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_course_milestone_collected(self):
student = UserFactory()
course = CourseFactory.create(org='edx', number='998', display_name='Test Course')
pre_requisite_course = CourseFactory.create(org='edx', number='999', display_name='Pre requisite Course')
# set pre-requisite course
set_prerequisite_courses(course.id, [unicode(pre_requisite_course.id)])
# get milestones collected by user before completing the pre-requisite course
completed_milestones = milestones_achieved_by_user(student, unicode(pre_requisite_course.id))
self.assertEqual(len(completed_milestones), 0)
GeneratedCertificateFactory.create(
user=student,
course_id=pre_requisite_course.id,
status=CertificateStatuses.generating,
mode='verified'
)
# get milestones collected by user after user has completed the pre-requisite course
completed_milestones = milestones_achieved_by_user(student, unicode(pre_requisite_course.id))
self.assertEqual(len(completed_milestones), 1)
self.assertEqual(completed_milestones[0]['namespace'], unicode(pre_requisite_course.id))
@patch.dict(settings.FEATURES, {'ENABLE_OPENBADGES': True})
@patch('badges.backends.badgr.BadgrBackend', spec=True)
def test_badge_callback(self, handler):
student = UserFactory()
course = CourseFactory.create(org='edx', number='998', display_name='Test Course', issue_badges=True)
CourseCompleteImageConfigurationFactory()
CourseEnrollmentFactory(user=student, course_id=course.location.course_key, mode='honor')
cert = GeneratedCertificateFactory.create(
user=student,
course_id=course.id,
status=CertificateStatuses.generating,
mode='verified'
)
cert.status = CertificateStatuses.downloadable
cert.save()
self.assertTrue(handler.return_value.award.called)
| agpl-3.0 | -7,107,257,180,835,421,000 | 43.334783 | 120 | 0.64872 | false |
oihane/odoo | addons/product/report/__init__.py | 452 | 1080 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_pricelist
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,273,127,101,776,185,000 | 44 | 79 | 0.612037 | false |
msmolens/VTK | ThirdParty/ZopeInterface/zope/interface/tests/test_interface.py | 30 | 72591 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Interface implementation
"""
import unittest
_marker = object()
class _SilencePy3Deprecations(unittest.TestCase):
# silence deprecation warnings under py3
def failUnless(self, expr):
# St00pid speling.
return self.assertTrue(expr)
def failIf(self, expr):
# St00pid speling.
return self.assertFalse(expr)
class Test_invariant(unittest.TestCase):
def test_w_single(self):
from zope.interface.interface import invariant
from zope.interface.interface import TAGGED_DATA
def _check(*args, **kw):
pass
class Foo(object):
invariant(_check)
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'invariants': [_check]})
def test_w_multiple(self):
from zope.interface.interface import invariant
from zope.interface.interface import TAGGED_DATA
def _check(*args, **kw):
pass
def _another_check(*args, **kw):
pass
class Foo(object):
invariant(_check)
invariant(_another_check)
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'invariants': [_check, _another_check]})
class Test_taggedValue(unittest.TestCase):
def test_w_single(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': ['baz']})
def test_w_multiple(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
taggedValue('qux', 'spam')
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': ['baz'], 'qux': 'spam'})
def test_w_multiple_overwriting(self):
from zope.interface.interface import taggedValue
from zope.interface.interface import TAGGED_DATA
class Foo(object):
taggedValue('bar', ['baz'])
taggedValue('qux', 'spam')
taggedValue('bar', 'frob')
self.assertEqual(getattr(Foo, TAGGED_DATA, None),
{'bar': 'frob', 'qux': 'spam'})
class ElementTests(unittest.TestCase):
DEFAULT_NAME = 'AnElement'
def _getTargetClass(self):
from zope.interface.interface import Element
return Element
def _makeOne(self, name=None, __doc__=_marker):
if name is None:
name = self.DEFAULT_NAME
if __doc__ is _marker:
return self._getTargetClass()(name)
return self._getTargetClass()(name, __doc__)
def test_ctor_defaults(self):
element = self._makeOne()
self.assertEqual(element.__name__, self.DEFAULT_NAME)
self.assertEqual(element.getName(), self.DEFAULT_NAME)
self.assertEqual(element.__doc__, '')
self.assertEqual(element.getDoc(), '')
self.assertEqual(list(element.getTaggedValueTags()), [])
def test_ctor_no_doc_space_in_name(self):
element = self._makeOne('An Element')
self.assertEqual(element.__name__, None)
self.assertEqual(element.__doc__, 'An Element')
def test_getTaggedValue_miss(self):
element = self._makeOne()
self.assertRaises(KeyError, element.getTaggedValue, 'nonesuch')
def test_queryTaggedValue_miss(self):
element = self._makeOne()
self.assertEqual(element.queryTaggedValue('nonesuch'), None)
def test_queryTaggedValue_miss_w_default(self):
element = self._makeOne()
self.assertEqual(element.queryTaggedValue('nonesuch', 'bar'), 'bar')
def test_setTaggedValue(self):
element = self._makeOne()
element.setTaggedValue('foo', 'bar')
self.assertEqual(list(element.getTaggedValueTags()), ['foo'])
self.assertEqual(element.getTaggedValue('foo'), 'bar')
self.assertEqual(element.queryTaggedValue('foo'), 'bar')
class SpecificationBasePyTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import SpecificationBasePy
return SpecificationBasePy
def _makeOne(self):
return self._getTargetClass()()
def test_providedBy_miss(self):
from zope.interface import interface
from zope.interface.declarations import _empty
sb = self._makeOne()
def _providedBy(obj):
return _empty
with _Monkey(interface, providedBy=_providedBy):
self.failIf(sb.providedBy(object()))
def test_providedBy_hit(self):
from zope.interface import interface
sb = self._makeOne()
class _Decl(object):
_implied = {sb: {},}
def _providedBy(obj):
return _Decl()
with _Monkey(interface, providedBy=_providedBy):
self.failUnless(sb.providedBy(object()))
def test_implementedBy_miss(self):
from zope.interface import interface
from zope.interface.declarations import _empty
sb = self._makeOne()
def _implementedBy(obj):
return _empty
with _Monkey(interface, implementedBy=_implementedBy):
self.failIf(sb.implementedBy(object()))
def test_implementedBy_hit(self):
from zope.interface import interface
sb = self._makeOne()
class _Decl(object):
_implied = {sb: {},}
def _implementedBy(obj):
return _Decl()
with _Monkey(interface, implementedBy=_implementedBy):
self.failUnless(sb.implementedBy(object()))
def test_isOrExtends_miss(self):
sb = self._makeOne()
sb._implied = {} # not defined by SpecificationBasePy
self.failIf(sb.isOrExtends(object()))
def test_isOrExtends_hit(self):
sb = self._makeOne()
testing = object()
sb._implied = {testing: {}} # not defined by SpecificationBasePy
self.failUnless(sb(testing))
def test___call___miss(self):
sb = self._makeOne()
sb._implied = {} # not defined by SpecificationBasePy
self.failIf(sb.isOrExtends(object()))
def test___call___hit(self):
sb = self._makeOne()
testing = object()
sb._implied = {testing: {}} # not defined by SpecificationBasePy
self.failUnless(sb(testing))
class InterfaceBasePyTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import InterfaceBasePy
return InterfaceBasePy
def _makeOne(self, object_should_provide):
class IB(self._getTargetClass()):
def _call_conform(self, conform):
return conform(self)
def providedBy(self, obj):
return object_should_provide
return IB()
def test___call___w___conform___returning_value(self):
ib = self._makeOne(False)
conformed = object()
class _Adapted(object):
def __conform__(self, iface):
return conformed
self.failUnless(ib(_Adapted()) is conformed)
def test___call___w___conform___miss_ob_provides(self):
ib = self._makeOne(True)
class _Adapted(object):
def __conform__(self, iface):
return None
adapted = _Adapted()
self.failUnless(ib(adapted) is adapted)
def test___call___wo___conform___ob_no_provides_w_alternate(self):
ib = self._makeOne(False)
adapted = object()
alternate = object()
self.failUnless(ib(adapted, alternate) is alternate)
def test___call___w___conform___ob_no_provides_wo_alternate(self):
ib = self._makeOne(False)
adapted = object()
self.assertRaises(TypeError, ib, adapted)
def test___adapt___ob_provides(self):
ib = self._makeOne(True)
adapted = object()
self.failUnless(ib.__adapt__(adapted) is adapted)
def test___adapt___ob_no_provides_uses_hooks(self):
from zope.interface import interface
ib = self._makeOne(False)
adapted = object()
_missed = []
def _hook_miss(iface, obj):
_missed.append((iface, obj))
return None
def _hook_hit(iface, obj):
return obj
with _Monkey(interface, adapter_hooks=[_hook_miss, _hook_hit]):
self.failUnless(ib.__adapt__(adapted) is adapted)
self.assertEqual(_missed, [(ib, adapted)])
class SpecificationTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import Specification
return Specification
def _makeOne(self, bases=_marker):
if bases is _marker:
return self._getTargetClass()()
return self._getTargetClass()(bases)
def test_ctor(self):
from zope.interface.interface import Interface
spec = self._makeOne()
self.assertEqual(spec.__bases__, ())
self.assertEqual(len(spec._implied), 2)
self.failUnless(spec in spec._implied)
self.failUnless(Interface in spec._implied)
self.assertEqual(len(spec.dependents), 0)
def test_subscribe_first_time(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
self.assertEqual(len(spec.dependents), 1)
self.assertEqual(spec.dependents[dep], 1)
def test_subscribe_again(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
spec.subscribe(dep)
self.assertEqual(spec.dependents[dep], 2)
def test_unsubscribe_miss(self):
spec = self._makeOne()
dep = DummyDependent()
self.assertRaises(KeyError, spec.unsubscribe, dep)
def test_unsubscribe(self):
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
spec.subscribe(dep)
spec.unsubscribe(dep)
self.assertEqual(spec.dependents[dep], 1)
spec.unsubscribe(dep)
self.failIf(dep in spec.dependents)
def test___setBases_subscribes_bases_and_notifies_dependents(self):
from zope.interface.interface import Interface
spec = self._makeOne()
dep = DummyDependent()
spec.subscribe(dep)
class I(Interface):
pass
class J(Interface):
pass
spec.__bases__ = (I,)
self.assertEqual(dep._changed, [spec])
self.assertEqual(I.dependents[spec], 1)
spec.__bases__ = (J,)
self.assertEqual(I.dependents.get(spec), None)
self.assertEqual(J.dependents[spec], 1)
def test_changed_clears_volatiles_and_implied(self):
from zope.interface.interface import Interface
class I(Interface):
pass
spec = self._makeOne()
spec._v_attrs = 'Foo'
spec._implied[I] = ()
spec.changed(spec)
self.failUnless(getattr(spec, '_v_attrs', self) is self)
self.failIf(I in spec._implied)
class InterfaceClassTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.interface import InterfaceClass
return InterfaceClass
def _makeOne(self, name='ITest', bases=(), attrs=None, __doc__=None,
__module__=None):
return self._getTargetClass()(name, bases, attrs, __doc__, __module__)
def test_ctor_defaults(self):
klass = self._getTargetClass()
inst = klass('ITesting')
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.getBases(), ())
def test_ctor_bad_bases(self):
klass = self._getTargetClass()
self.assertRaises(TypeError, klass, 'ITesting', (object(),))
def test_ctor_w_attrs_attrib_methods(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.names(), ATTRS.keys())
def test_ctor_attrs_w___locals__(self):
ATTRS = {'__locals__': {}}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(inst.names(), ATTRS.keys())
def test_ctor_attrs_w__decorator_non_return(self):
from zope.interface.interface import _decorator_non_return
ATTRS = {'dropme': _decorator_non_return}
klass = self._getTargetClass()
inst = klass('ITesting', attrs=ATTRS)
self.assertEqual(inst.__name__, 'ITesting')
self.assertEqual(inst.__doc__, '')
self.assertEqual(inst.__bases__, ())
self.assertEqual(list(inst.names()), [])
def test_ctor_attrs_w_invalide_attr_type(self):
from zope.interface.exceptions import InvalidInterface
ATTRS = {'invalid': object()}
klass = self._getTargetClass()
self.assertRaises(InvalidInterface, klass, 'ITesting', attrs=ATTRS)
def test_interfaces(self):
iface = self._makeOne()
self.assertEqual(list(iface.interfaces()), [iface])
def test_getBases(self):
iface = self._makeOne()
sub = self._makeOne('ISub', bases=(iface,))
self.assertEqual(sub.getBases(), (iface,))
def test_isEqualOrExtendedBy_identity(self):
iface = self._makeOne()
self.failUnless(iface.isEqualOrExtendedBy(iface))
def test_isEqualOrExtendedBy_subiface(self):
iface = self._makeOne()
sub = self._makeOne('ISub', bases=(iface,))
self.failUnless(iface.isEqualOrExtendedBy(sub))
self.failIf(sub.isEqualOrExtendedBy(iface))
def test_isEqualOrExtendedBy_unrelated(self):
one = self._makeOne('One')
another = self._makeOne('Another')
self.failIf(one.isEqualOrExtendedBy(another))
self.failIf(another.isEqualOrExtendedBy(one))
def test_names_w_all_False_ignores_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=False)), ['baz'])
def test_names_w_all_True_no_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(sorted(one.names(all=True)), ['bar', 'foo'])
def test_names_w_all_True_w_bases_simple(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=True)), ['bar', 'baz', 'foo'])
def test_names_w_all_True_bases_w_same_names(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.names(all=True)), ['bar', 'baz', 'foo'])
def test___iter__(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived), ['bar', 'baz', 'foo'])
def test_namesAndDescriptions_w_all_False_ignores_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=False)),
[('baz', DERIVED_ATTRS['baz']),
])
def test_namesAndDescriptions_w_all_True_no_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(sorted(one.namesAndDescriptions(all=False)),
[('bar', ATTRS['bar']),
('foo', ATTRS['foo']),
])
def test_namesAndDescriptions_w_all_True_simple(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=True)),
[('bar', BASE_ATTRS['bar']),
('baz', DERIVED_ATTRS['baz']),
('foo', BASE_ATTRS['foo']),
])
def test_namesAndDescriptions_w_all_True_bases_w_same_names(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(sorted(derived.namesAndDescriptions(all=True)),
[('bar', BASE_ATTRS['bar']),
('baz', DERIVED_ATTRS['baz']),
('foo', DERIVED_ATTRS['foo']),
])
def test_getDescriptionFor_miss(self):
one = self._makeOne()
self.assertRaises(KeyError, one.getDescriptionFor, 'nonesuch')
def test_getDescriptionFor_hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(one.getDescriptionFor('foo'), ATTRS['foo'])
self.assertEqual(one.getDescriptionFor('bar'), ATTRS['bar'])
def test___getitem___miss(self):
one = self._makeOne()
def _test():
return one['nonesuch']
self.assertRaises(KeyError, _test)
def test___getitem___hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.assertEqual(one['foo'], ATTRS['foo'])
self.assertEqual(one['bar'], ATTRS['bar'])
def test___contains___miss(self):
one = self._makeOne()
self.failIf('nonesuch' in one)
def test___contains___hit(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
one = self._makeOne(attrs=ATTRS)
self.failUnless('foo' in one)
self.failUnless('bar' in one)
def test_direct_miss(self):
one = self._makeOne()
self.assertEqual(one.direct('nonesuch'), None)
def test_direct_hit_local_miss_bases(self):
from zope.interface.interface import Attribute
from zope.interface.interface import fromFunction
def _bar():
"""DOCSTRING"""
def _foo():
"""DOCSTRING"""
BASE_ATTRS = {'foo': Attribute('Foo', ''),
'bar': fromFunction(_bar),
}
DERIVED_ATTRS = {'foo': fromFunction(_foo),
'baz': Attribute('Baz', ''),
}
base = self._makeOne('IBase', attrs=BASE_ATTRS)
derived = self._makeOne('IDerived', bases=(base,), attrs=DERIVED_ATTRS)
self.assertEqual(derived.direct('foo'), DERIVED_ATTRS['foo'])
self.assertEqual(derived.direct('baz'), DERIVED_ATTRS['baz'])
self.assertEqual(derived.direct('bar'), None)
def test_queryDescriptionFor_miss(self):
iface = self._makeOne()
self.assertEqual(iface.queryDescriptionFor('nonesuch'), None)
def test_queryDescriptionFor_hit(self):
from zope.interface import Attribute
ATTRS = {'attr': Attribute('Title', 'Description')}
iface = self._makeOne(attrs=ATTRS)
self.assertEqual(iface.queryDescriptionFor('attr'), ATTRS['attr'])
#TODO (or not: 'deferred' looks like a fossil to me.
#def test_deferred_cache_hit(self):
#def test_deferred_cache_miss(self):
#def test_deferred_cache_miss_w_bases(self):
def test_validateInvariants_pass(self):
_called_with = []
def _passable(*args, **kw):
_called_with.append((args, kw))
return True
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_passable])
self.assertEqual(iface.validateInvariants(obj), None)
self.assertEqual(_called_with, [((obj,), {})])
def test_validateInvariants_fail_wo_errors_passed(self):
from zope.interface.exceptions import Invalid
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_passable, _fail])
self.assertRaises(Invalid, iface.validateInvariants, obj)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
def test_validateInvariants_fail_w_errors_passed(self):
from zope.interface.exceptions import Invalid
_errors = []
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
iface = self._makeOne()
obj = object()
iface.setTaggedValue('invariants', [_fail])
self.assertRaises(Invalid, iface.validateInvariants, obj, _errors)
self.assertEqual(_fail_called_with, [((obj,), {})])
self.assertEqual(len(_errors), 1)
self.failUnless(isinstance(_errors[0], Invalid))
def test_validateInvariants_fail_in_base_wo_errors_passed(self):
from zope.interface.exceptions import Invalid
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
base = self._makeOne('IBase')
derived = self._makeOne('IDerived', (base,))
obj = object()
base.setTaggedValue('invariants', [_fail])
derived.setTaggedValue('invariants', [_passable])
self.assertRaises(Invalid, derived.validateInvariants, obj)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
#TODO
def test_validateInvariants_fail_in_base_w_errors_passed(self):
from zope.interface.exceptions import Invalid
_errors = []
_passable_called_with = []
def _passable(*args, **kw):
_passable_called_with.append((args, kw))
return True
_fail_called_with = []
def _fail(*args, **kw):
_fail_called_with.append((args, kw))
raise Invalid
base = self._makeOne('IBase')
derived = self._makeOne('IDerived', (base,))
obj = object()
base.setTaggedValue('invariants', [_fail])
derived.setTaggedValue('invariants', [_passable])
self.assertRaises(Invalid, derived.validateInvariants, obj, _errors)
self.assertEqual(_passable_called_with, [((obj,), {})])
self.assertEqual(_fail_called_with, [((obj,), {})])
self.assertEqual(len(_errors), 1)
self.failUnless(isinstance(_errors[0], Invalid))
def test___reduce__(self):
iface = self._makeOne('PickleMe')
self.assertEqual(iface.__reduce__(), 'PickleMe')
def test___hash___normal(self):
iface = self._makeOne('HashMe')
self.assertEqual(hash(iface),
hash((('HashMe',
'zope.interface.tests.test_interface'))))
def test___hash___missing_required_attrs(self):
import warnings
try:
from warnings import catch_warnings
except ImportError: # Python 2.5
return
class Derived(self._getTargetClass()):
def __init__(self):
pass # Don't call base class.
derived = Derived()
with catch_warnings(record=True) as warned:
warnings.simplefilter('always') # see LP #825249
self.assertEqual(hash(derived), 1)
self.assertEqual(len(warned), 1)
self.failUnless(warned[0].category is UserWarning)
self.assertEqual(str(warned[0].message),
'Hashing uninitialized InterfaceClass instance')
def test_comparison_with_None(self):
iface = self._makeOne()
self.failUnless(iface < None)
self.failUnless(iface <= None)
self.failIf(iface == None)
self.failUnless(iface != None)
self.failIf(iface >= None)
self.failIf(iface > None)
self.failIf(None < iface)
self.failIf(None <= iface)
self.failIf(None == iface)
self.failUnless(None != iface)
self.failUnless(None >= iface)
self.failUnless(None > iface)
def test_comparison_with_same_instance(self):
iface = self._makeOne()
self.failIf(iface < iface)
self.failUnless(iface <= iface)
self.failUnless(iface == iface)
self.failIf(iface != iface)
self.failUnless(iface >= iface)
self.failIf(iface > iface)
def test_comparison_with_same_named_instance_in_other_module(self):
one = self._makeOne('IName', __module__='zope.interface.tests.one')
other = self._makeOne('IName', __module__='zope.interface.tests.other')
self.failUnless(one < other)
self.failIf(other < one)
self.failUnless(one <= other)
self.failIf(other <= one)
self.failIf(one == other)
self.failIf(other == one)
self.failUnless(one != other)
self.failUnless(other != one)
self.failIf(one >= other)
self.failUnless(other >= one)
self.failIf(one > other)
self.failUnless(other > one)
class InterfaceTests(_SilencePy3Deprecations):
def test_attributes_link_to_interface(self):
from zope.interface import Interface
from zope.interface import Attribute
class I1(Interface):
attr = Attribute("My attr")
self.failUnless(I1['attr'].interface is I1)
def test_methods_link_to_interface(self):
from zope.interface import Interface
class I1(Interface):
def method(foo, bar, bingo):
pass
self.failUnless(I1['method'].interface is I1)
def test_classImplements_simple(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ICurrent(Interface):
def method1(a, b):
pass
def method2(a, b):
pass
class IOther(Interface):
pass
class Current(object):
__implemented__ = ICurrent
def method1(self, a, b):
return 1
def method2(self, a, b):
return 2
current = Current()
self.failUnless(ICurrent.implementedBy(Current))
self.failIf(IOther.implementedBy(Current))
self.failUnless(ICurrent in implementedBy(Current))
self.failIf(IOther in implementedBy(Current))
self.failUnless(ICurrent in providedBy(current))
self.failIf(IOther in providedBy(current))
def test_classImplements_base_not_derived(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
class Current():
__implemented__ = IBase
def method(self):
pass
current = Current()
self.failUnless(IBase.implementedBy(Current))
self.failIf(IDerived.implementedBy(Current))
self.failUnless(IBase in implementedBy(Current))
self.failIf(IDerived in implementedBy(Current))
self.failUnless(IBase in providedBy(current))
self.failIf(IDerived in providedBy(current))
def test_classImplements_base_and_derived(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class IBase(Interface):
def method():
pass
class IDerived(IBase):
pass
class Current(object):
__implemented__ = IDerived
def method(self):
pass
current = Current()
self.failUnless(IBase.implementedBy(Current))
self.failUnless(IDerived.implementedBy(Current))
self.failIf(IBase in implementedBy(Current))
self.failUnless(IBase in implementedBy(Current).flattened())
self.failUnless(IDerived in implementedBy(Current))
self.failIf(IBase in providedBy(current))
self.failUnless(IBase in providedBy(current).flattened())
self.failUnless(IDerived in providedBy(current))
def test_classImplements_multiple(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ILeft(Interface):
def method():
pass
class IRight(ILeft):
pass
class Left(object):
__implemented__ = ILeft
def method(self):
pass
class Right(object):
__implemented__ = IRight
class Ambi(Left, Right):
pass
ambi = Ambi()
self.failUnless(ILeft.implementedBy(Ambi))
self.failUnless(IRight.implementedBy(Ambi))
self.failUnless(ILeft in implementedBy(Ambi))
self.failUnless(IRight in implementedBy(Ambi))
self.failUnless(ILeft in providedBy(ambi))
self.failUnless(IRight in providedBy(ambi))
def test_classImplements_multiple_w_explict_implements(self):
from zope.interface import Interface
from zope.interface import implementedBy
from zope.interface import providedBy
class ILeft(Interface):
def method():
pass
class IRight(ILeft):
pass
class IOther(Interface):
pass
class Left():
__implemented__ = ILeft
def method(self):
pass
class Right(object):
__implemented__ = IRight
class Other(object):
__implemented__ = IOther
class Mixed(Left, Right):
__implemented__ = Left.__implemented__, Other.__implemented__
mixed = Mixed()
self.failUnless(ILeft.implementedBy(Mixed))
self.failIf(IRight.implementedBy(Mixed))
self.failUnless(IOther.implementedBy(Mixed))
self.failUnless(ILeft in implementedBy(Mixed))
self.failIf(IRight in implementedBy(Mixed))
self.failUnless(IOther in implementedBy(Mixed))
self.failUnless(ILeft in providedBy(mixed))
self.failIf(IRight in providedBy(mixed))
self.failUnless(IOther in providedBy(mixed))
def test_interface_deferred_class_method_broken(self):
from zope.interface import Interface
from zope.interface.exceptions import BrokenImplementation
class IDeferring(Interface):
def method():
pass
class Deferring(IDeferring.deferred()):
__implemented__ = IDeferring
deferring = Deferring()
self.assertRaises(BrokenImplementation, deferring.method)
def testInterfaceExtendsInterface(self):
from zope.interface import Interface
new = Interface.__class__
FunInterface = new('FunInterface')
BarInterface = new('BarInterface', [FunInterface])
BobInterface = new('BobInterface')
BazInterface = new('BazInterface', [BobInterface, BarInterface])
self.failUnless(BazInterface.extends(BobInterface))
self.failUnless(BazInterface.extends(BarInterface))
self.failUnless(BazInterface.extends(FunInterface))
self.failIf(BobInterface.extends(FunInterface))
self.failIf(BobInterface.extends(BarInterface))
self.failUnless(BarInterface.extends(FunInterface))
self.failIf(BarInterface.extends(BazInterface))
def test_verifyClass(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.verify import verifyClass
from zope.interface._compat import _u
class ICheckMe(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
class CheckMe(object):
__implemented__ = ICheckMe
attr = 'value'
def method(self):
pass
self.failUnless(verifyClass(ICheckMe, CheckMe))
def test_verifyObject(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.verify import verifyObject
from zope.interface._compat import _u
class ICheckMe(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
class CheckMe(object):
__implemented__ = ICheckMe
attr = 'value'
def method(self):
pass
check_me = CheckMe()
self.failUnless(verifyObject(ICheckMe, check_me))
def test_interface_object_provides_Interface(self):
from zope.interface import Interface
class AnInterface(Interface):
pass
self.failUnless(Interface.providedBy(AnInterface))
def test_names_simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
self.assertEqual(sorted(ISimple.names()), ['attr', 'method'])
def test_names_derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
pass
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
pass
def method2():
pass
self.assertEqual(sorted(IDerived.names()),
['attr2', 'method', 'method2'])
self.assertEqual(sorted(IDerived.names(all=True)),
['attr', 'attr2', 'method', 'method2'])
def test_namesAndDescriptions_simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
name_values = sorted(ISimple.namesAndDescriptions())
self.assertEqual(len(name_values), 2)
self.assertEqual(name_values[0][0], 'attr')
self.failUnless(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr')
self.assertEqual(name_values[0][1].__doc__, 'My attr')
self.assertEqual(name_values[1][0], 'method')
self.failUnless(isinstance(name_values[1][1], Method))
self.assertEqual(name_values[1][1].__name__, 'method')
self.assertEqual(name_values[1][1].__doc__, 'My method')
def test_namesAndDescriptions_derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.interface import Method
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
name_values = sorted(IDerived.namesAndDescriptions())
self.assertEqual(len(name_values), 3)
self.assertEqual(name_values[0][0], 'attr2')
self.failUnless(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr2')
self.assertEqual(name_values[0][1].__doc__, 'My attr2')
self.assertEqual(name_values[1][0], 'method')
self.failUnless(isinstance(name_values[1][1], Method))
self.assertEqual(name_values[1][1].__name__, 'method')
self.assertEqual(name_values[1][1].__doc__, 'My method, overridden')
self.assertEqual(name_values[2][0], 'method2')
self.failUnless(isinstance(name_values[2][1], Method))
self.assertEqual(name_values[2][1].__name__, 'method2')
self.assertEqual(name_values[2][1].__doc__, 'My method2')
name_values = sorted(IDerived.namesAndDescriptions(all=True))
self.assertEqual(len(name_values), 4)
self.assertEqual(name_values[0][0], 'attr')
self.failUnless(isinstance(name_values[0][1], Attribute))
self.assertEqual(name_values[0][1].__name__, 'attr')
self.assertEqual(name_values[0][1].__doc__, 'My attr')
self.assertEqual(name_values[1][0], 'attr2')
self.failUnless(isinstance(name_values[1][1], Attribute))
self.assertEqual(name_values[1][1].__name__, 'attr2')
self.assertEqual(name_values[1][1].__doc__, 'My attr2')
self.assertEqual(name_values[2][0], 'method')
self.failUnless(isinstance(name_values[2][1], Method))
self.assertEqual(name_values[2][1].__name__, 'method')
self.assertEqual(name_values[2][1].__doc__, 'My method, overridden')
self.assertEqual(name_values[3][0], 'method2')
self.failUnless(isinstance(name_values[3][1], Method))
self.assertEqual(name_values[3][1].__name__, 'method2')
self.assertEqual(name_values[3][1].__doc__, 'My method2')
def test_getDescriptionFor_nonesuch_no_default(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertRaises(KeyError, IEmpty.getDescriptionFor, 'nonesuch')
def test_getDescriptionFor_simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
a_desc = ISimple.getDescriptionFor('attr')
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = ISimple.getDescriptionFor('method')
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method')
def test_getDescriptionFor_derived(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
a_desc = IDerived.getDescriptionFor('attr')
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = IDerived.getDescriptionFor('method')
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method, overridden')
a2_desc = IDerived.getDescriptionFor('attr2')
self.failUnless(isinstance(a2_desc, Attribute))
self.assertEqual(a2_desc.__name__, 'attr2')
self.assertEqual(a2_desc.__doc__, 'My attr2')
m2_desc = IDerived.getDescriptionFor('method2')
self.failUnless(isinstance(m2_desc, Method))
self.assertEqual(m2_desc.__name__, 'method2')
self.assertEqual(m2_desc.__doc__, 'My method2')
def test___getitem__nonesuch(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertRaises(KeyError, IEmpty.__getitem__, 'nonesuch')
def test___getitem__simple(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
a_desc = ISimple['attr']
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = ISimple['method']
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method')
def test___getitem___derived(self):
from zope.interface import Attribute
from zope.interface.interface import Method
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
a_desc = IDerived['attr']
self.failUnless(isinstance(a_desc, Attribute))
self.assertEqual(a_desc.__name__, 'attr')
self.assertEqual(a_desc.__doc__, 'My attr')
m_desc = IDerived['method']
self.failUnless(isinstance(m_desc, Method))
self.assertEqual(m_desc.__name__, 'method')
self.assertEqual(m_desc.__doc__, 'My method, overridden')
a2_desc = IDerived['attr2']
self.failUnless(isinstance(a2_desc, Attribute))
self.assertEqual(a2_desc.__name__, 'attr2')
self.assertEqual(a2_desc.__doc__, 'My attr2')
m2_desc = IDerived['method2']
self.failUnless(isinstance(m2_desc, Method))
self.assertEqual(m2_desc.__name__, 'method2')
self.assertEqual(m2_desc.__doc__, 'My method2')
def test___contains__nonesuch(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.failIf('nonesuch' in IEmpty)
def test___contains__simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
self.failUnless('attr' in ISimple)
self.failUnless('method' in ISimple)
def test___contains__derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
self.failUnless('attr' in IDerived)
self.failUnless('method' in IDerived)
self.failUnless('attr2' in IDerived)
self.failUnless('method2' in IDerived)
def test___iter__empty(self):
from zope.interface import Interface
class IEmpty(Interface):
pass
self.assertEqual(list(IEmpty), [])
def test___iter__simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class ISimple(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
self.assertEqual(sorted(list(ISimple)), ['attr', 'method'])
def test___iter__derived(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface._compat import _u
class IBase(Interface):
attr = Attribute(_u('My attr'))
def method():
"My method"
class IDerived(IBase):
attr2 = Attribute(_u('My attr2'))
def method():
"My method, overridden"
def method2():
"My method2"
self.assertEqual(sorted(list(IDerived)),
['attr', 'attr2', 'method', 'method2'])
def test_function_attributes_become_tagged_values(self):
from zope.interface import Interface
class ITagMe(Interface):
def method():
pass
method.optional = 1
method = ITagMe['method']
self.assertEqual(method.getTaggedValue('optional'), 1)
def test___doc___non_element(self):
from zope.interface import Interface
class IHaveADocString(Interface):
"xxx"
self.assertEqual(IHaveADocString.__doc__, "xxx")
self.assertEqual(list(IHaveADocString), [])
def test___doc___as_element(self):
from zope.interface import Attribute
from zope.interface import Interface
class IHaveADocString(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(IHaveADocString.__doc__, "")
self.assertEqual(list(IHaveADocString), ['__doc__'])
def _errorsEqual(self, has_invariant, error_len, error_msgs, iface):
from zope.interface.exceptions import Invalid
self.assertRaises(Invalid, iface.validateInvariants, has_invariant)
e = []
try:
iface.validateInvariants(has_invariant, e)
except Invalid as error:
self.assertEqual(error.args[0], e)
else:
self._assert(0) # validateInvariants should always raise
# Invalid
self.assertEqual(len(e), error_len)
msgs = [error.args[0] for error in e]
msgs.sort()
for msg in msgs:
self.assertEqual(msg, error_msgs.pop(0))
def test_invariant_simple(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class HasInvariant(object):
pass
# set up
has_invariant = HasInvariant()
directlyProvides(has_invariant, IInvariant)
# the tests
self.assertEqual(IInvariant.getTaggedValue('invariants'),
[_ifFooThenBar])
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
has_invariant.bar = 27
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
has_invariant.foo = 42
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
del has_invariant.bar
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
IInvariant)
def test_invariant_nested(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class ISubInvariant(IInvariant):
invariant(_barGreaterThanFoo)
class HasInvariant(object):
pass
# nested interfaces with invariants:
self.assertEqual(ISubInvariant.getTaggedValue('invariants'),
[_barGreaterThanFoo])
has_invariant = HasInvariant()
directlyProvides(has_invariant, ISubInvariant)
has_invariant.foo = 42
# even though the interface has changed, we should still only have one
# error.
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
ISubInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
has_invariant.foo = 2
has_invariant.bar = 1
self._errorsEqual(has_invariant, 1,
['Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
has_invariant.foo = 1
has_invariant.bar = 0
self._errorsEqual(has_invariant, 2,
['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# for a happy ending, we'll make the invariants happy
has_invariant.foo = 1
has_invariant.bar = 2
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
def test_invariant_mutandis(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import invariant
class IInvariant(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
invariant(_ifFooThenBar)
class HasInvariant(object):
pass
# now we'll do two invariants on the same interface,
# just to make sure that a small
# multi-invariant interface is at least minimally tested.
has_invariant = HasInvariant()
directlyProvides(has_invariant, IInvariant)
has_invariant.foo = 42
# if you really need to mutate, then this would be the way to do it.
# Probably a bad idea, though. :-)
old_invariants = IInvariant.getTaggedValue('invariants')
invariants = old_invariants[:]
invariants.append(_barGreaterThanFoo)
IInvariant.setTaggedValue('invariants', invariants)
# even though the interface has changed, we should still only have one
# error.
self._errorsEqual(has_invariant, 1, ['If Foo, then Bar!'],
IInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
has_invariant.foo = 2
has_invariant.bar = 1
self._errorsEqual(has_invariant, 1,
['Please, Boo MUST be greater than Foo!'], IInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
has_invariant.foo = 1
has_invariant.bar = 0
self._errorsEqual(has_invariant, 2,
['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
IInvariant)
# for another happy ending, we'll make the invariants happy again
has_invariant.foo = 1
has_invariant.bar = 2
self.assertEqual(IInvariant.validateInvariants(has_invariant), None)
# clean up
IInvariant.setTaggedValue('invariants', old_invariants)
def test___doc___element(self):
from zope.interface import Interface
from zope.interface import Attribute
class I(Interface):
"xxx"
self.assertEqual(I.__doc__, "xxx")
self.assertEqual(list(I), [])
class I(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(I.__doc__, "")
self.assertEqual(list(I), ['__doc__'])
def testIssue228(self):
# Test for http://collector.zope.org/Zope3-dev/228
# Old style classes don't have a '__class__' attribute
import sys
if sys.version[0] < '3':
# No old style classes in Python 3, so the test becomes moot.
from zope.interface import Interface
class I(Interface):
"xxx"
class OldStyle:
__providedBy__ = None
self.assertRaises(AttributeError, I.providedBy, OldStyle)
def test_invariant_as_decorator(self):
from zope.interface import Interface
from zope.interface import Attribute
from zope.interface import implementer
from zope.interface import invariant
from zope.interface.exceptions import Invalid
class IRange(Interface):
min = Attribute("Lower bound")
max = Attribute("Upper bound")
@invariant
def range_invariant(ob):
if ob.max < ob.min:
raise Invalid('max < min')
@implementer(IRange)
class Range(object):
def __init__(self, min, max):
self.min, self.max = min, max
IRange.validateInvariants(Range(1,2))
IRange.validateInvariants(Range(1,1))
try:
IRange.validateInvariants(Range(2,1))
except Invalid as e:
self.assertEqual(str(e), 'max < min')
def test_taggedValue(self):
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import taggedValue
class ITagged(Interface):
foo = Attribute('foo')
bar = Attribute('bar; must eval to Boolean True if foo does')
taggedValue('qux', 'Spam')
class HasInvariant(object):
pass
self.assertEqual(ITagged.getTaggedValue('qux'), 'Spam')
self.failUnless('qux' in ITagged.getTaggedValueTags())
def test_description_cache_management(self):
# See https://bugs.launchpad.net/zope.interface/+bug/185974
# There was a bug where the cache used by Specification.get() was not
# cleared when the bases were changed.
from zope.interface import Interface
from zope.interface import Attribute
class I1(Interface):
a = Attribute('a')
class I2(I1):
pass
class I3(I2):
pass
self.failUnless(I3.get('a') is I1.get('a'))
I2.__bases__ = (Interface,)
self.failUnless(I3.get('a') is None)
def test___call___defers_to___conform___(self):
from zope.interface import Interface
from zope.interface import implementer
class I(Interface):
pass
@implementer(I)
class C(object):
def __conform__(self, proto):
return 0
self.assertEqual(I(C()), 0)
def test___call___object_implements(self):
from zope.interface import Interface
from zope.interface import implementer
class I(Interface):
pass
@implementer(I)
class C(object):
pass
c = C()
self.failUnless(I(c) is c)
def test___call___miss_wo_alternate(self):
from zope.interface import Interface
class I(Interface):
pass
class C(object):
pass
c = C()
self.assertRaises(TypeError, I, c)
def test___call___miss_w_alternate(self):
from zope.interface import Interface
class I(Interface):
pass
class C(object):
pass
c = C()
self.failUnless(I(c, self) is self)
def test___call___w_adapter_hook(self):
from zope.interface import Interface
from zope.interface.interface import adapter_hooks
old_hooks = adapter_hooks[:]
def _miss(iface, obj):
pass
def _hit(iface, obj):
return self
class I(Interface):
pass
class C(object):
pass
c = C()
old_adapter_hooks = adapter_hooks[:]
adapter_hooks[:] = [_miss, _hit]
try:
self.failUnless(I(c) is self)
finally:
adapter_hooks[:] = old_adapter_hooks
class AttributeTests(ElementTests):
DEFAULT_NAME = 'TestAttribute'
def _getTargetClass(self):
from zope.interface.interface import Attribute
return Attribute
class MethodTests(AttributeTests):
DEFAULT_NAME = 'TestMethod'
def _getTargetClass(self):
from zope.interface.interface import Method
return Method
def test_optional_as_property(self):
method = self._makeOne()
self.assertEqual(method.optional, {})
method.optional = {'foo': 'bar'}
self.assertEqual(method.optional, {'foo': 'bar'})
del method.optional
self.assertEqual(method.optional, {})
def test___call___raises_BrokenImplementation(self):
from zope.interface.exceptions import BrokenImplementation
method = self._makeOne()
try:
method()
except BrokenImplementation as e:
self.assertEqual(e.interface, None)
self.assertEqual(e.name, self.DEFAULT_NAME)
else:
self.fail('__call__ should raise BrokenImplementation')
def test_getSignatureInfo_bare(self):
method = self._makeOne()
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_getSignatureString_bare(self):
method = self._makeOne()
self.assertEqual(method.getSignatureString(), '()')
def test_getSignatureString_w_only_required(self):
method = self._makeOne()
method.positional = method.required = ['foo']
self.assertEqual(method.getSignatureString(), '(foo)')
def test_getSignatureString_w_optional(self):
method = self._makeOne()
method.positional = method.required = ['foo']
method.optional = {'foo': 'bar'}
self.assertEqual(method.getSignatureString(), "(foo='bar')")
def test_getSignatureString_w_varargs(self):
method = self._makeOne()
method.varargs = 'args'
self.assertEqual(method.getSignatureString(), "(*args)")
def test_getSignatureString_w_kwargs(self):
method = self._makeOne()
method.kwargs = 'kw'
self.assertEqual(method.getSignatureString(), "(**kw)")
class Test_fromFunction(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.interface import fromFunction
return fromFunction(*args, **kw)
def test_bare(self):
def _func():
"DOCSTRING"
method = self._callFUT(_func)
self.assertEqual(method.getName(), '_func')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_interface(self):
from zope.interface.interface import InterfaceClass
class IFoo(InterfaceClass):
pass
def _func():
"DOCSTRING"
method = self._callFUT(_func, interface=IFoo)
self.assertEqual(method.interface, IFoo)
def test_w_name(self):
def _func():
"DOCSTRING"
method = self._callFUT(_func, name='anotherName')
self.assertEqual(method.getName(), 'anotherName')
def test_w_only_required(self):
def _func(foo):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_optional(self):
def _func(foo='bar'):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo'])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {'foo': 'bar'})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_optional_self(self):
# XXX This is a weird case, trying to cover the following code in
# FUT::
#
# nr = na-len(defaults)
# if nr < 0:
# defaults=defaults[-nr:]
# nr = 0
def _func(self='bar'):
"DOCSTRING"
method = self._callFUT(_func, imlevel=1)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_w_varargs(self):
def _func(*args):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], None)
def test_w_kwargs(self):
def _func(**kw):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], 'kw')
def test_full_spectrum(self):
def _func(foo, bar='baz', *args, **kw):
"DOCSTRING"
method = self._callFUT(_func)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo', 'bar'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {'bar': 'baz'})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], 'kw')
class Test_fromMethod(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.interface import fromMethod
return fromMethod(*args, **kw)
def test_no_args(self):
class Foo(object):
def bar(self):
"DOCSTRING"
method = self._callFUT(Foo.bar)
self.assertEqual(method.getName(), 'bar')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
def test_full_spectrum(self):
class Foo(object):
def bar(self, foo, bar='baz', *args, **kw):
"DOCSTRING"
method = self._callFUT(Foo.bar)
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), ['foo', 'bar'])
self.assertEqual(list(info['required']), ['foo'])
self.assertEqual(info['optional'], {'bar': 'baz'})
self.assertEqual(info['varargs'], 'args')
self.assertEqual(info['kwargs'], 'kw')
def test_w_non_method(self):
def foo():
"DOCSTRING"
method = self._callFUT(foo)
self.assertEqual(method.getName(), 'foo')
self.assertEqual(method.getDoc(), 'DOCSTRING')
self.assertEqual(method.interface, None)
self.assertEqual(list(method.getTaggedValueTags()), [])
info = method.getSignatureInfo()
self.assertEqual(list(info['positional']), [])
self.assertEqual(list(info['required']), [])
self.assertEqual(info['optional'], {})
self.assertEqual(info['varargs'], None)
self.assertEqual(info['kwargs'], None)
class DummyDependent(object):
def __init__(self):
self._changed = []
def changed(self, originally_changed):
self._changed.append(originally_changed)
def _barGreaterThanFoo(obj):
from zope.interface.exceptions import Invalid
foo = getattr(obj, 'foo', None)
bar = getattr(obj, 'bar', None)
if foo is not None and isinstance(foo, type(bar)):
# type checking should be handled elsewhere (like, say,
# schema); these invariants should be intra-interface
# constraints. This is a hacky way to do it, maybe, but you
# get the idea
if not bar > foo:
raise Invalid('Please, Boo MUST be greater than Foo!')
def _ifFooThenBar(obj):
from zope.interface.exceptions import Invalid
if getattr(obj, 'foo', None) and not getattr(obj, 'bar', None):
raise Invalid('If Foo, then Bar!')
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
def test_suite():
import doctest
return unittest.TestSuite((
unittest.makeSuite(ElementTests),
unittest.makeSuite(SpecificationBasePyTests),
unittest.makeSuite(InterfaceBasePyTests),
unittest.makeSuite(SpecificationTests),
unittest.makeSuite(InterfaceTests),
unittest.makeSuite(AttributeTests),
unittest.makeSuite(MethodTests),
unittest.makeSuite(Test_fromFunction),
#unittest.makeSuite(Test_fromMethod),
doctest.DocTestSuite(),
doctest.DocTestSuite("zope.interface.interface"),
))
| bsd-3-clause | -5,660,409,157,293,757,000 | 33.749162 | 80 | 0.58656 | false |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/apps/headphones/lib/requests/auth.py | 120 | 6669 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self.num_401_calls = 1
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
| gpl-2.0 | 3,736,381,122,403,699,700 | 30.909091 | 88 | 0.556605 | false |
unatv2/unatv2 | qa/rpc-tests/listtransactions.py | 145 | 6081 | #!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def run_test(nodes):
# Simple send, 0 to 1:
txid = nodes[0].sendtoaddress(nodes[1].getnewaddress(), 0.1)
sync_mempools(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
nodes[0].setgenerate(True, 1)
sync_blocks(nodes)
check_array_result(nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = nodes[0].sendtoaddress(nodes[0].getnewaddress(), 0.2)
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { nodes[0].getnewaddress() : 0.11, nodes[1].getnewaddress() : 0.22,
nodes[0].getaccountaddress("from1") : 0.33, nodes[1].getaccountaddress("toself") : 0.44 }
txid = nodes[1].sendmany("", send_to)
sync_mempools(nodes)
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| mit | 515,628,013,720,518,500 | 37.980769 | 105 | 0.569314 | false |
saketkc/statsmodels | tools/backport_pr.py | 30 | 5263 | #!/usr/bin/env python
"""
Backport pull requests to a particular branch.
Usage: backport_pr.py branch [PR]
e.g.:
python tools/backport_pr.py 0.13.1 123
to backport PR #123 onto branch 0.13.1
or
python tools/backport_pr.py 1.x
to see what PRs are marked for backport that have yet to be applied.
Copied from IPython 9e82bc5
https://github.com/ipython/ipython/blob/master/tools/backport_pr.py
"""
from __future__ import print_function
import os
import re
import sys
from subprocess import Popen, PIPE, check_call, check_output
from urllib import urlopen
from gh_api import (
get_issues_list,
get_pull_request,
get_pull_request_files,
is_pull_request,
get_milestone_id,
)
from pandas import Series
def find_rejects(root='.'):
for dirname, dirs, files in os.walk(root):
for fname in files:
if fname.endswith('.rej'):
yield os.path.join(dirname, fname)
def get_current_branch():
branches = check_output(['git', 'branch'])
for branch in branches.splitlines():
if branch.startswith('*'):
return branch[1:].strip()
def backport_pr(branch, num, project='statsmodels/statsmodels'):
current_branch = get_current_branch()
if branch != current_branch:
check_call(['git', 'checkout', branch])
check_call(['git', 'pull'])
pr = get_pull_request(project, num, auth=True)
files = get_pull_request_files(project, num, auth=True)
patch_url = pr['patch_url']
title = pr['title']
description = pr['body']
fname = "PR%i.patch" % num
if os.path.exists(fname):
print("using patch from {fname}".format(**locals()))
with open(fname) as f:
patch = f.read()
else:
req = urlopen(patch_url)
patch = req.read()
msg = "Backport PR #%i: %s" % (num, title) + '\n\n' + description
check = Popen(['git', 'apply', '--check', '--verbose'], stdin=PIPE)
a,b = check.communicate(patch)
if check.returncode:
print("patch did not apply, saving to {fname}".format(**locals()))
print("edit {fname} until `cat {fname} | git apply --check` succeeds".format(**locals()))
print("then run tools/backport_pr.py {num} again".format(**locals()))
if not os.path.exists(fname):
with open(fname, 'wb') as f:
f.write(patch)
return 1
p = Popen(['git', 'apply'], stdin=PIPE)
a,b = p.communicate(patch)
filenames = [ f['filename'] for f in files ]
check_call(['git', 'add'] + filenames)
check_call(['git', 'commit', '-m', msg])
print("PR #%i applied, with msg:" % num)
print()
print(msg)
print()
if branch != current_branch:
check_call(['git', 'checkout', current_branch])
return 0
backport_re = re.compile(r"[Bb]ackport.*?(\d+)")
def already_backported(branch, since_tag=None):
"""return set of PRs that have been backported already"""
if since_tag is None:
since_tag = check_output(['git','describe', branch, '--abbrev=0']).decode('utf8').strip()
cmd = ['git', 'log', '%s..%s' % (since_tag, branch), '--oneline']
lines = check_output(cmd).decode('utf8')
return set(int(num) for num in backport_re.findall(lines))
def should_backport(labels=None, milestone=None):
"""return set of PRs marked for backport"""
if labels is None and milestone is None:
raise ValueError("Specify one of labels or milestone.")
elif labels is not None and milestone is not None:
raise ValueError("Specify only one of labels or milestone.")
if labels is not None:
issues = get_issues_list("statsmodels/statsmodels",
labels=labels,
state='closed',
auth=True,
)
else:
milestone_id = get_milestone_id("statsmodels/statsmodels", milestone,
auth=True)
issues = get_issues_list("statsmodels/statsmodels",
milestone=milestone_id,
state='closed',
auth=True,
)
should_backport = []
merged_dates = []
for issue in issues:
if not is_pull_request(issue):
continue
pr = get_pull_request("statsmodels/statsmodels", issue['number'],
auth=True)
if not pr['merged']:
print ("Marked PR closed without merge: %i" % pr['number'])
continue
if pr['number'] not in should_backport:
merged_dates.append(pr['merged_at'])
should_backport.append(pr['number'])
return Series(merged_dates, index=should_backport)
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
sys.exit(1)
if len(sys.argv) < 3:
branch = sys.argv[1]
already = already_backported(branch)
#NOTE: change this to the label you've used for marking a backport
should = should_backport(milestone="0.5.1")
print ("The following PRs should be backported:")
to_backport = []
if already:
should = should.ix[set(should.index).difference(already)]
should.sort()
for pr, date in should.iteritems():
print (pr)
sys.exit(0)
sys.exit(backport_pr(sys.argv[1], int(sys.argv[2])))
| bsd-3-clause | -6,453,828,736,326,143,000 | 29.777778 | 97 | 0.598708 | false |
PulsePod/evepod | lib/python2.7/site-packages/pip/vendor/html5lib/filters/whitespace.py | 1730 | 1142 | from __future__ import absolute_import, division, unicode_literals
import re
from . import _base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| apache-2.0 | 7,045,932,464,951,442,000 | 29.052632 | 90 | 0.590193 | false |
sergev/mraa | examples/python/rgblcd.py | 43 | 1470 | #!/usr/bin/env python
# Author: Brendan Le Foll <[email protected]>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import mraa
# This example will change the LCD backlight on the Grove-LCD RGB backlight
# to a nice shade of purple
x = mraa.I2c(0)
x.address(0x62)
# initialise device
x.writeReg(0, 0)
x.writeReg(1, 0)
# sent RGB color data
x.writeReg(0x08, 0xAA)
x.writeReg(0x04, 255)
x.writeReg(0x02, 255)
| mit | -5,626,878,122,901,954,000 | 36.692308 | 75 | 0.765986 | false |
pypa/setuptools | setuptools/command/setopt.py | 6 | 5051 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
import configparser
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
log.debug("Reading configuration from %s", filename)
opts = configparser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| mit | -9,051,294,473,781,537,000 | 33.128378 | 79 | 0.567214 | false |
gonesurfing/Quisk_rpi_remote | hiqsdr/quisk_hardware.py | 1 | 13927 | # This is a sample hardware file for UDP control. Use this file for my 2010 transceiver
# described in QEX and for the improved version HiQSDR. To turn on the extended
# features in HiQSDR, update your FPGA firmware to version 1.1 or later and use use_rx_udp = 2.
from __future__ import print_function
import struct, socket, math, traceback
import _quisk as QS
from quisk_hardware_model import Hardware as BaseHardware
DEBUG = 0
class Hardware(BaseHardware):
def __init__(self, app, conf):
BaseHardware.__init__(self, app, conf)
self.use_sidetone = 1
self.got_udp_status = '' # status from UDP receiver
# want_udp_status is a 14-byte string with numbers in little-endian order:
# [0:2] 'St'
# [2:6] Rx tune phase
# [6:10] Tx tune phase
# [10] Tx output level 0 to 255
# [11] Tx control bits:
# 0x01 Enable CW transmit
# 0x02 Enable all other transmit
# 0x04 Use the HiQSDR extended IO pins not present in the 2010 QEX ver 1.0
# 0x08 The key is down (software key)
# [12] Rx control bits
# Second stage decimation less one, 1-39, six bits
# [13] zero or firmware version number
# The above is used for firmware version 1.0.
# Version 1.1 adds eight more bytes for the HiQSDR conntrol ports:
# [14] X1 connector: Preselect pins 69, 68, 65, 64; Preamp pin 63, Tx LED pin 57
# [15] Attenuator pins 84, 83, 82, 81, 80
# [16] More bits: AntSwitch pin 41 is 0x01
# [17:22] The remaining five bytes are sent as zero.
# Version 1.2 uses the same format as 1.1, but adds the "Qs" command (see below).
# Version 1.3 adds features needed by the new quisk_vna.py program:
# [17] This one byte must be zero
# [18:20] This is vna_count, the number of VNA data points; or zero for normal operation
# [20:22] These two bytes must be zero
# The "Qs" command is a two-byte UDP packet sent to the control port. It returns the hardware status
# as the above string, except that the string starts with "Qs" instead of "St". Do not send the "Qs" command
# from Quisk, as it interferes with the "St" command. The "Qs" command is meant to be used from an
# external program, such as HamLib or a logging program.
# When vna_count != 0, we are in VNA mode. The start frequency is rx_phase, and for each point tx_phase is added
# to advance the frequency. A zero sample is added to mark the blocks. The samples are I and Q averaged at DC.
self.rx_phase = 0
self.tx_phase = 0
self.tx_level = 0
self.tx_control = 0
self.rx_control = 0
self.vna_count = 0 # VNA scan count; MUST be zero for non-VNA operation
self.index = 0
self.mode = None
self.band = None
self.rf_gain = 0
self.HiQSDR_Connector_X1 = 0
self.HiQSDR_Attenuator = 0
self.HiQSDR_Bits = 0
if conf.use_rx_udp == 2: # Set to 2 for the HiQSDR
self.rf_gain_labels = ('RF 0 dB', 'RF +10', 'RF -10', 'RF -20', 'RF -30')
self.antenna_labels = ('Ant 1', 'Ant 2')
self.firmware_version = None # firmware version is initially unknown
self.rx_udp_socket = None
self.vfo_frequency = 0 # current vfo frequency
self.tx_frequency = 0
self.decimations = [] # supported decimation rates
for dec in (40, 20, 10, 8, 5, 4, 2):
self.decimations.append(dec * 64)
if self.conf.fft_size_multiplier == 0:
self.conf.fft_size_multiplier = 6 # Set size needed by VarDecim
def open(self):
# Create the proper broadcast address for rx_udp_ip.
nm = self.conf.rx_udp_ip_netmask.split('.')
ip = self.conf.rx_udp_ip.split('.')
nm = map(int, nm)
ip = map(int, ip)
bc = ''
for i in range(4):
x = (ip[i] | ~ nm[i]) & 0xFF
bc = bc + str(x) + '.'
self.broadcast_addr = bc[:-1]
# This socket is used for the Simple Network Discovery Protocol by AE4JY
self.socket_sndp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket_sndp.setblocking(0)
self.socket_sndp.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sndp_request = chr(56) + chr(0) + chr(0x5A) + chr(0xA5) + chr(0) * 52
self.sndp_active = self.conf.sndp_active
# conf.rx_udp_port is used for returning ADC samples
# conf.rx_udp_port + 1 is used for control
self.rx_udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_udp_socket.setblocking(0)
self.rx_udp_socket.connect((self.conf.rx_udp_ip, self.conf.rx_udp_port + 1))
return QS.open_rx_udp(self.conf.rx_udp_ip, self.conf.rx_udp_port)
def close(self):
if self.rx_udp_socket:
self.rx_udp_socket.close()
self.rx_udp_socket = None
def ReturnFrequency(self): # Return the current tuning and VFO frequency
return None, None # frequencies have not changed
def ReturnVfoFloat(self): # Return the accurate VFO as a float
return float(self.rx_phase) * self.conf.rx_udp_clock / 2.0**32
def ChangeFrequency(self, tx_freq, vfo_freq, source='', band='', event=None):
if vfo_freq != self.vfo_frequency:
self.vfo_frequency = vfo_freq
self.rx_phase = int(float(vfo_freq) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
if tx_freq and tx_freq > 0:
self.tx_frequency = tx_freq
tx = tx_freq
self.tx_phase = int(float(tx) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.NewUdpStatus()
return tx_freq, vfo_freq
def ChangeMode(self, mode):
# mode is a string: "USB", "AM", etc.
self.mode = mode
self.tx_control &= ~0x03 # Erase last two bits
if self.vna_count:
pass
elif mode in ("CWL", "CWU"):
self.tx_control |= 0x01
elif mode in ("USB", "LSB", "AM", "FM"):
self.tx_control |= 0x02
elif mode[0:4] == 'DGT-':
self.tx_control |= 0x02
elif mode[0:3] == 'IMD':
self.tx_control |= 0x02
self.SetTxLevel()
def ChangeBand(self, band):
# band is a string: "60", "40", "WWV", etc.
self.band = band
self.HiQSDR_Connector_X1 &= ~0x0F # Mask in the last four bits
self.HiQSDR_Connector_X1 |= self.conf.HiQSDR_BandDict.get(band, 0) & 0x0F
self.SetTxLevel()
def SetTxLevel(self):
# As tx_level varies from 50 to 200, the output level changes from 263 to 752 mV
# So 0 to 255 is 100 to 931, or 1.0 to 9.31; v = 1.0 + 0.0326 * level
if not self.vna_count:
try:
self.tx_level = self.conf.tx_level[self.band]
except KeyError:
self.tx_level = self.conf.tx_level[None] # The default
if self.mode[0:4] == 'DGT-':
reduc = self.application.digital_tx_level
else:
reduc = self.application.tx_level
if reduc < 100: # reduce power by a percentage
level = 1.0 + self.tx_level * 0.0326
level *= math.sqrt(reduc / 100.0) # Convert from a power to an amplitude
self.tx_level = int((level - 1.0) / 0.0326 + 0.5)
if self.tx_level < 0:
self.tx_level = 0
self.NewUdpStatus()
def OnButtonRfGain(self, event):
# The HiQSDR attenuator is five bits: 2, 4, 8, 10, 20 dB
btn = event.GetEventObject()
n = btn.index
self.HiQSDR_Connector_X1 &= ~0x10 # Mask in the preamp bit
if n == 0: # 0dB
self.HiQSDR_Attenuator = 0
self.rf_gain = 0
elif n == 1: # +10
self.HiQSDR_Attenuator = 0
self.HiQSDR_Connector_X1 |= 0x10
self.rf_gain = 10
elif n == 2: # -10
self.HiQSDR_Attenuator = 0x08
self.rf_gain = -10
elif n == 3: # -20
self.HiQSDR_Attenuator = 0x10
self.rf_gain = -20
elif n == 4: # -30
self.HiQSDR_Attenuator = 0x18
self.rf_gain = -30
else:
self.HiQSDR_Attenuator = 0
self.rf_gain = 0
print ('Unknown RfGain')
self.NewUdpStatus()
def OnButtonPTT(self, event):
# This feature requires firmware version 1.1 or higher
if self.firmware_version:
btn = event.GetEventObject()
if btn.GetValue(): # Turn the software key bit on or off
self.tx_control |= 0x08
else:
self.tx_control &= ~0x08
self.NewUdpStatus(True) # Prompt update for PTT
def OnButtonAntenna(self, event):
# This feature requires extended IO
btn = event.GetEventObject()
if btn.index:
self.HiQSDR_Bits |= 0x01
else:
self.HiQSDR_Bits &= ~0x01
self.NewUdpStatus()
def HeartBeat(self):
if self.sndp_active: # AE4JY Simple Network Discovery Protocol - attempt to set the FPGA IP address
try:
self.socket_sndp.sendto(self.sndp_request, (self.broadcast_addr, 48321))
data = self.socket_sndp.recv(1024)
# print(repr(data))
except:
# traceback.print_exc()
pass
else:
if len(data) == 56 and data[5:14] == 'HiQSDR-v1':
ip = self.conf.rx_udp_ip.split('.')
t = (data[0:4] + chr(2) + data[5:37] + chr(int(ip[3])) + chr(int(ip[2])) + chr(int(ip[1])) + chr(int(ip[0]))
+ chr(0) * 12 + chr(self.conf.rx_udp_port & 0xFF) + chr(self.conf.rx_udp_port >> 8) + chr(0))
# print(repr(t))
self.socket_sndp.sendto(t, (self.broadcast_addr, 48321))
try: # receive the old status if any
data = self.rx_udp_socket.recv(1024)
if DEBUG:
self.PrintStatus(' got ', data)
except:
pass
else:
if data[0:2] == 'St':
self.got_udp_status = data
if self.firmware_version is None: # get the firmware version
if self.want_udp_status[0:13] != self.got_udp_status[0:13]:
try:
self.rx_udp_socket.send(self.want_udp_status)
if DEBUG:
self.PrintStatus('Start', self.want_udp_status)
except:
pass
else: # We got a correct response.
self.firmware_version = ord(self.got_udp_status[13]) # Firmware version is returned here
if DEBUG:
print ('Got version', self.firmware_version)
if self.firmware_version > 0 and self.conf.use_rx_udp == 2:
self.tx_control |= 0x04 # Use extra control bytes
self.sndp_active = False
self.NewUdpStatus()
else:
if self.want_udp_status != self.got_udp_status:
if DEBUG:
self.PrintStatus('Have ', self.got_udp_status)
self.PrintStatus(' send', self.want_udp_status)
try:
self.rx_udp_socket.send(self.want_udp_status)
except:
pass
elif DEBUG:
self.rx_udp_socket.send('Qs')
def PrintStatus(self, msg, string):
print (msg, ' ', end=' ')
print (string[0:2], end=' ')
for c in string[2:]:
print ("%2X" % ord(c), end=' ')
print ()
def GetFirmwareVersion(self):
return self.firmware_version
def OnSpot(self, level):
pass
def OnBtnFDX(self, is_fdx): # Status of FDX button, 0 or 1
if is_fdx:
self.HiQSDR_Connector_X1 |= 0x20 # Mask in the FDX bit
else:
self.HiQSDR_Connector_X1 &= ~0x20
self.NewUdpStatus()
def VarDecimGetChoices(self): # return text labels for the control
clock = self.conf.rx_udp_clock
l = [] # a list of sample rates
for dec in self.decimations:
l.append(str(int(float(clock) / dec / 1e3 + 0.5)))
return l
def VarDecimGetLabel(self): # return a text label for the control
return "Sample rate ksps"
def VarDecimGetIndex(self): # return the current index
return self.index
def VarDecimSet(self, index=None): # set decimation, return sample rate
if index is None: # initial call to set decimation before the call to open()
rate = self.application.vardecim_set # May be None or from different hardware
try:
dec = int(float(self.conf.rx_udp_clock // rate + 0.5))
self.index = self.decimations.index(dec)
except:
try:
self.index = self.decimations.index(self.conf.rx_udp_decimation)
except:
self.index = 0
else:
self.index = index
dec = self.decimations[self.index]
self.rx_control = dec // 64 - 1 # Second stage decimation less one
self.NewUdpStatus()
return int(float(self.conf.rx_udp_clock) / dec + 0.5)
def VarDecimRange(self):
return (48000, 960000)
def NewUdpStatus(self, do_tx=False):
s = "St"
s = s + struct.pack("<L", self.rx_phase)
s = s + struct.pack("<L", self.tx_phase)
s = s + chr(self.tx_level) + chr(self.tx_control)
s = s + chr(self.rx_control)
if self.firmware_version: # Add the version
s = s + chr(self.firmware_version) # The firmware version will be returned
if self.tx_control & 0x04: # Use extra HiQSDR control bytes
s = s + chr(self.HiQSDR_Connector_X1)
s = s + chr(self.HiQSDR_Attenuator)
s = s + chr(self.HiQSDR_Bits)
s = s + chr(0)
else:
s = s + chr(0) * 4
s = s + struct.pack("<H", self.vna_count)
s = s + chr(0) * 2
else: # firmware version 0 or None
s = s + chr(0) # assume version 0
self.want_udp_status = s
if do_tx:
try:
self.rx_udp_socket.send(s)
except:
pass
def SetVNA(self, key_down=None, vna_start=None, vna_stop=None, vna_count=None, do_tx=False):
if key_down is None:
pass
elif key_down:
self.tx_control |= 0x08
else:
self.tx_control &= ~0x08
if vna_count is not None:
self.vna_count = vna_count # Number of scan points
if vna_start is not None: # Set the start and stop frequencies. The tx_phase is the frequency delta.
self.rx_phase = int(float(vna_start) / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.tx_phase = int(float(vna_stop - vna_start) / self.vna_count / self.conf.rx_udp_clock * 2.0**32 + 0.5) & 0xFFFFFFFF
self.tx_control &= ~0x03 # Erase last two bits
self.rx_control = 40 - 1
self.tx_level = 255
self.NewUdpStatus(do_tx)
start = int(float(self.rx_phase) * self.conf.rx_udp_clock / 2.0**32 + 0.5)
stop = int(start + float(self.tx_phase) * self.vna_count * self.conf.rx_udp_clock / 2.0**32 + 0.5)
return start, stop # return the start and stop frequencies after integer rounding
| gpl-2.0 | -1,035,308,506,490,065,200 | 39.841642 | 125 | 0.625476 | false |
dmoliveira/networkx | networkx/generators/community.py | 30 | 11963 | """Generators for classes of graphs used in studying social networks."""
import itertools
import math
import random
import networkx as nx
# Copyright(C) 2011 by
# Ben Edwards <[email protected]>
# Aric Hagberg <[email protected]>
# All rights reserved.
# BSD license.
__author__ = """\n""".join(['Ben Edwards ([email protected])',
'Aric Hagberg ([email protected])'])
__all__ = ['caveman_graph', 'connected_caveman_graph',
'relaxed_caveman_graph', 'random_partition_graph',
'planted_partition_graph', 'gaussian_random_partition_graph']
def caveman_graph(l, k):
"""Returns a caveman graph of ``l`` cliques of size ``k``.
Parameters
----------
l : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.caveman_graph(3, 3)
See also
--------
connected_caveman_graph
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
# l disjoint cliques of size k
G = nx.empty_graph(l*k)
G.name = "caveman_graph(%s,%s)" % (l*k, k)
if k > 1:
for start in range(0, l*k, k):
edges = itertools.combinations(range(start, start+k), 2)
G.add_edges_from(edges)
return G
def connected_caveman_graph(l, k):
"""Returns a connected caveman graph of ``l`` cliques of size ``k``.
The connected caveman graph is formed by creating ``n`` cliques of size
``k``, then a single edge in each clique is rewired to a node in an
adjacent clique.
Parameters
----------
l : int
number of cliques
k : int
size of cliques
Returns
-------
G : NetworkX Graph
connected caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.connected_caveman_graph(3, 3)
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
G = nx.caveman_graph(l, k)
G.name = "connected_caveman_graph(%s,%s)" % (l, k)
for start in range(0, l*k, k):
G.remove_edge(start, start+1)
G.add_edge(start, (start-1) % (l*k))
return G
def relaxed_caveman_graph(l, k, p, seed=None):
"""Return a relaxed caveman graph.
A relaxed caveman graph starts with ``l`` cliques of size ``k``. Edges are
then randomly rewired with probability ``p`` to link different cliques.
Parameters
----------
l : int
Number of groups
k : int
Size of cliques
p : float
Probabilty of rewiring each edge.
seed : int,optional
Seed for random number generator(default=None)
Returns
-------
G : NetworkX Graph
Relaxed Caveman Graph
Raises
------
NetworkXError:
If p is not in [0,1]
Examples
--------
>>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42)
References
----------
.. [1] Santo Fortunato, Community Detection in Graphs,
Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
http://arxiv.org/abs/0906.0612
"""
if not seed is None:
random.seed(seed)
G = nx.caveman_graph(l, k)
nodes = G.nodes()
G.name = "relaxed_caveman_graph (%s,%s,%s)" % (l, k, p)
for (u, v) in G.edges():
if random.random() < p: # rewire the edge
x = random.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
return G
def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
"""Return the random partition graph with a partition of sizes.
A partition graph is a graph of communities with sizes defined by
s in sizes. Nodes in the same group are connected with probability
p_in and nodes of different groups are connected with probability
p_out.
Parameters
----------
sizes : list of ints
Sizes of groups
p_in : float
probability of edges with in groups
p_out : float
probability of edges between groups
directed : boolean optional, default=False
Whether to create a directed graph
seed : int optional, default None
A seed for the random number generator
Returns
-------
G : NetworkX Graph or DiGraph
random partition graph of size sum(gs)
Raises
------
NetworkXError
If p_in or p_out is not in [0,1]
Examples
--------
>>> G = nx.random_partition_graph([10,10,10],.25,.01)
>>> len(G)
30
>>> partition = G.graph['partition']
>>> len(partition)
3
Notes
-----
This is a generalization of the planted-l-partition described in
[1]_. It allows for the creation of groups of any size.
The partition is store as a graph attribute 'partition'.
References
----------
.. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. http://arxiv.org/abs/0906.0612
http://arxiv.org/abs/0906.0612
"""
# Use geometric method for O(n+m) complexity algorithm
# partition=nx.community_sets(nx.get_node_attributes(G,'affiliation'))
if not seed is None:
random.seed(seed)
if not 0.0 <= p_in <= 1.0:
raise nx.NetworkXError("p_in must be in [0,1]")
if not 0.0 <= p_out <= 1.0:
raise nx.NetworkXError("p_out must be in [0,1]")
if directed:
G = nx.DiGraph()
else:
G = nx.Graph()
G.graph['partition'] = []
n = sum(sizes)
G.add_nodes_from(range(n))
# start with len(sizes) groups of gnp random graphs with parameter p_in
# graphs are unioned together with node labels starting at
# 0, sizes[0], sizes[0]+sizes[1], ...
next_group = {} # maps node key (int) to first node in next group
start = 0
group = 0
for n in sizes:
edges = ((u+start, v+start)
for u, v in
nx.fast_gnp_random_graph(n, p_in, directed=directed).edges())
G.add_edges_from(edges)
next_group.update(dict.fromkeys(range(start, start+n), start+n))
G.graph['partition'].append(set(range(start, start+n)))
group += 1
start += n
# handle edge cases
if p_out == 0:
return G
if p_out == 1:
for n in next_group:
targets = range(next_group[n], len(G))
G.add_edges_from(zip([n]*len(targets), targets))
if directed:
G.add_edges_from(zip(targets, [n]*len(targets)))
return G
# connect each node in group randomly with the nodes not in group
# use geometric method like fast_gnp_random_graph()
lp = math.log(1.0 - p_out)
n = len(G)
if directed:
for u in range(n):
v = 0
while v < n:
lr = math.log(1.0 - random.random())
v += int(lr/lp)
# skip over nodes in the same group as v, including self loops
if next_group.get(v, n) == next_group[u]:
v = next_group[u]
if v < n:
G.add_edge(u, v)
v += 1
else:
for u in range(n-1):
v = next_group[u] # start with next node not in this group
while v < n:
lr = math.log(1.0 - random.random())
v += int(lr/lp)
if v < n:
G.add_edge(u, v)
v += 1
return G
def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
"""Return the planted l-partition graph.
This model partitions a graph with n=l*k vertices in
l groups with k vertices each. Vertices of the same
group are linked with a probability p_in, and vertices
of different groups are linked with probability p_out.
Parameters
----------
l : int
Number of groups
k : int
Number of vertices in each group
p_in : float
probability of connecting vertices within a group
p_out : float
probability of connected vertices between groups
seed : int,optional
Seed for random number generator(default=None)
directed : bool,optional (default=False)
If True return a directed graph
Returns
-------
G : NetworkX Graph or DiGraph
planted l-partition graph
Raises
------
NetworkXError:
If p_in,p_out are not in [0,1] or
Examples
--------
>>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1,seed=42)
See Also
--------
random_partition_model
References
----------
.. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning
on the planted partition model,
Random Struct. Algor. 18 (2001) 116-140.
.. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. http://arxiv.org/abs/0906.0612
"""
return random_partition_graph([k]*l, p_in, p_out, seed, directed)
def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
seed=None):
"""Generate a Gaussian random partition graph.
A Gaussian random partition graph is created by creating k partitions
each with a size drawn from a normal distribution with mean s and variance
s/v. Nodes are connected within clusters with probability p_in and
between clusters with probability p_out[1]
Parameters
----------
n : int
Number of nodes in the graph
s : float
Mean cluster size
v : float
Shape parameter. The variance of cluster size distribution is s/v.
p_in : float
Probabilty of intra cluster connection.
p_out : float
Probability of inter cluster connection.
directed : boolean, optional default=False
Whether to create a directed graph or not
seed : int
Seed value for random number generator
Returns
-------
G : NetworkX Graph or DiGraph
gaussian random partition graph
Raises
------
NetworkXError
If s is > n
If p_in or p_out is not in [0,1]
Notes
-----
Note the number of partitions is dependent on s,v and n, and that the
last partition may be considerably smaller, as it is sized to simply
fill out the nodes [1]
See Also
--------
random_partition_graph
Examples
--------
>>> G = nx.gaussian_random_partition_graph(100,10,10,.25,.1)
>>> len(G)
100
References
----------
.. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner,
Experiments on Graph Clustering Algorithms,
In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
"""
if s > n:
raise nx.NetworkXError("s must be <= n")
assigned = 0
sizes = []
while True:
size = int(random.normalvariate(s, float(s) / v + 0.5))
if size < 1: # how to handle 0 or negative sizes?
continue
if assigned + size >= n:
sizes.append(n-assigned)
break
assigned += size
sizes.append(size)
return random_partition_graph(sizes, p_in, p_out, directed, seed)
| bsd-3-clause | 5,867,875,322,336,766,000 | 28.321078 | 79 | 0.584302 | false |
hjarmstrong/Odme-plusplus | 3rd/build/tools/build/v2/test/TestCmd.py | 44 | 20065 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing of
executable commands and scripts (in any language, not just Python), especially
commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd module
manages and cleans up one or more temporary workspace directories, and provides
methods for creating files and directories in those workspace directories from
in-line data, here-documents), allowing tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
test = TestCmd()
The TestCmd module provides pass_test(), fail_test(), and no_result() unbound
methods that report test results for use with the Aegis change management
system. These methods terminate the test immediately, reporting PASSED, FAILED
or NO RESULT respectively and exiting with status 0 (success), 1 or 2
respectively. This allows for a distinction between an actual failed test and a
test that could not be properly evaluated because of an external condition (such
as a full file system or incorrect permissions).
"""
# Copyright 2000 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# Copyright 2002-2003 Vladimir Prus.
# Copyright 2002-2003 Dave Abrahams.
# Copyright 2006 Rene Rivera.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from string import join, split
__author__ = "Steven Knight <[email protected]>"
__revision__ = "TestCmd.py 0.D002 2001/08/31 14:56:12 software"
__version__ = "0.02"
from types import *
import os
import os.path
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import traceback
tempfile.template = 'testcmd.'
_Cleanup = []
def _clean():
global _Cleanup
list = _Cleanup[:]
_Cleanup = []
list.reverse()
for test in list:
test.cleanup()
sys.exitfunc = _clean
def caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name == "?":
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self=None, condition=True, function=None, skip=0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED and exits
with a status of 1. If a condition argument is supplied, the test fails
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + join(self.program, " ")
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at + """
in directory: """ + os.getcwd() )
sys.exit(1)
def no_result(self=None, condition=True, function=None, skip=0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test and
exits with a status of 2. If a condition argument is supplied, the test
fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
sys.exit(2)
def pass_test(self=None, condition=True, function=None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test and exits
with a status of 0. If a condition argument is supplied, the test passes
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines=None, matches=None):
"""
Returns whether the given lists or strings containing lines separated
using newline characters contain exactly the same data.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(matches) is ListType:
matches = split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines=None, res=None):
"""
Given lists or strings contain lines separated using newline characters.
This function matches those lines one by one, interpreting the lines in the
res parameter as regular expressions.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(res) is ListType:
res = split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
if not re.compile("^" + res[i] + "$").search(lines[i]):
return
return 1
class TestCmd:
def __init__(self, description=None, program=None, workdir=None,
subdir=None, verbose=False, match=None, inpath=None):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program, inpath)
self.verbose_set(verbose)
if match is None:
self.match_func = match_re
else:
self.match_func = match
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
env = os.environ.get('PRESERVE')
if env:
self._preserve['pass_test'] = env
self._preserve['fail_test'] = env
self._preserve['no_result'] = env
else:
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['pass_test'] = env
env = os.environ.get('PRESERVE_FAIL')
if env is not None:
self._preserve['fail_test'] = env
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['PRESERVE_NO_RESULT'] = env
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
def cleanup(self, condition=None):
"""
Removes any temporary working directories for the specified TestCmd
environment. If the environment variable PRESERVE was set when the
TestCmd environment was created, temporary working directories are not
removed. If any of the environment variables PRESERVE_PASS,
PRESERVE_FAIL or PRESERVE_NO_RESULT were set when the TestCmd
environment was created, then temporary working directories are not
removed if the test passed, failed or had no result, respectively.
Temporary working directories are also preserved for conditions
specified via the preserve method.
Typically, this method is not called directly, but is used when the
script exits to clean up temporary working directories as appropriate
for the exit status.
"""
if not self._dirlist:
return
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print("Preserved directory %s" % dir)
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors=1)
self._dirlist = []
self.workdir = None
os.chdir(self._cwd)
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def description_set(self, description):
"""Set the description of the functionality being tested."""
self.description = description
def fail_test(self, condition=True, function=None, skip=0):
"""Cause the test to fail."""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def match(self, lines, matches):
"""Compare actual and expected file contents."""
return self.match_func(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file content exactly."""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare file content with a regular expression."""
return match_re(lines, res)
def no_result(self, condition=True, function=None, skip=0):
"""Report that the test could not be run."""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition=True, function=None):
"""Cause the test to pass."""
if not condition:
return
self.condition = 'pass_test'
pass_test(self, condition, function)
def preserve(self, *conditions):
"""
Arrange for the temporary working directories for the specified
TestCmd environment to be preserved for one or more conditions. If no
conditions are specified, arranges for the temporary working
directories to be preserved for all conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program, inpath):
"""Set the executable program or script to be tested."""
if not inpath and program and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
self.program = program
def read(self, file, mode='rb'):
"""
Reads and returns the contents of the specified file name. The file
name may be a list, in which case the elements are concatenated with
the os.path.join() method. The file is assumed to be under the
temporary working directory unless it is an absolute path name. The I/O
mode for the file may be specified and must begin with an 'r'. The
default is 'rb' (binary read).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
return open(file, mode).read()
def run(self, program=None, arguments=None, chdir=None, stdin=None,
universal_newlines=True):
"""
Runs a test of the program or script for the test environment.
Standard output and error output are saved for future retrieval via the
stdout() and stderr() methods.
'universal_newlines' parameter controls how the child process
input/output streams are opened as defined for the same named Python
subprocess.POpen constructor parameter.
"""
if chdir:
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
else:
chdir = self.workdir
cmd = []
if program and program[0]:
if program[0] != self.program[0] and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
cmd += program
else:
cmd += self.program
if arguments:
cmd += arguments.split(" ")
if self.verbose:
sys.stderr.write(join(cmd, " ") + "\n")
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=chdir,
universal_newlines=universal_newlines)
if stdin:
if type(stdin) is ListType:
for line in stdin:
p.tochild.write(line)
else:
p.tochild.write(stdin)
out, err = p.communicate()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.returncode
if self.verbose:
sys.stdout.write(self._stdout[-1])
sys.stderr.write(self._stderr[-1])
def stderr(self, run=None):
"""
Returns the error output from the specified run number. If there is
no specified run number, then returns the error output of the last run.
If the run number is less than zero, then returns the error output from
that many runs back from the current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run -= 1
if run < 0:
return ''
return self._stderr[run]
def stdout(self, run=None):
"""
Returns the standard output from the specified run number. If there
is no specified run number, then returns the standard output of the
last run. If the run number is less than zero, then returns the
standard output from that many runs back from the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run -= 1
if run < 0:
return ''
return self._stdout[run]
def subdir(self, *subdirs):
"""
Create new subdirectories under the temporary working directory, one
for each argument. An argument may be a list, in which case the list
elements are concatenated using the os.path.join() method.
Subdirectories multiple levels deep must be created using a separate
argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if type(sub) is ListType:
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except:
pass
else:
count += 1
return count
def unlink(self, file):
"""
Unlinks the specified file name. The file name may be a list, in
which case the elements are concatenated using the os.path.join()
method. The file is assumed to be under the temporary working directory
unless it is an absolute path name.
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level."""
self.verbose = verbose
def workdir_set(self, path):
"""
Creates a temporary working directory with the specified path name.
If the path is a null string (''), a unique directory name is created.
"""
if os.path.isabs(path):
self.workdir = path
else:
if path != None:
if path == '':
path = tempfile.mktemp()
if path != None:
os.mkdir(path)
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
# We would like to set self.workdir like this:
# self.workdir = path
# But symlinks in the path will report things differently from
# os.getcwd(), so chdir there and back to fetch the canonical
# path.
cwd = os.getcwd()
os.chdir(path)
self.workdir = os.getcwd()
os.chdir(cwd)
else:
self.workdir = None
def workpath(self, *args):
"""
Returns the absolute path name to a subdirectory or file within the
current temporary working directory. Concatenates the temporary working
directory name with the specified arguments using os.path.join().
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def writable(self, top, write):
"""
Make the specified directory tree writable (write == 1) or not
(write == None).
"""
def _walk_chmod(arg, dirname, names):
st = os.stat(dirname)
os.chmod(dirname, arg(st[stat.ST_MODE]))
for name in names:
fullname = os.path.join(dirname, name)
st = os.stat(fullname)
os.chmod(fullname, arg(st[stat.ST_MODE]))
_mode_writable = lambda mode: stat.S_IMODE(mode|0200)
_mode_non_writable = lambda mode: stat.S_IMODE(mode&~0200)
if write:
f = _mode_writable
else:
f = _mode_non_writable
try:
os.path.walk(top, _walk_chmod, f)
except:
pass # Ignore any problems changing modes.
def write(self, file, content, mode='wb'):
"""
Writes the specified content text (second argument) to the specified
file name (first argument). The file name may be a list, in which case
the elements are concatenated using the os.path.join() method. The file
is created under the temporary working directory. Any subdirectories in
the path must already exist. The I/O mode for the file may be specified
and must begin with a 'w'. The default is 'wb' (binary write).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
open(file, mode).write(content)
| mit | 875,181,952,193,361,700 | 33.066214 | 80 | 0.59088 | false |
staranjeet/fjord | vendor/packages/translate-toolkit/translate/search/indexing/CommonIndexer.py | 3 | 25337 | # -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""
base class for interfaces to indexing engines for pootle
"""
import os
import translate.lang.data
def is_available():
"""Check if this indexing engine interface is usable.
This function must exist in every module that contains indexing engine
interfaces.
:return: is this interface usable?
:rtype: bool
"""
return False
class CommonDatabase(object):
"""Base class for indexing support.
Any real implementation must override most methods of this class.
"""
field_analyzers = {}
"""mapping of field names and analyzers - see
:meth:`~.CommonDatabase.set_field_analyzers`"""
ANALYZER_EXACT = 0
"""exact matching: the query string must equal the whole term string"""
ANALYZER_PARTIAL = 1 << 1
"""partial matching: a document matches, even if the query string only
matches the beginning of the term value."""
ANALYZER_TOKENIZE = 1 << 2
"""tokenize terms and queries automatically"""
ANALYZER_DEFAULT = ANALYZER_TOKENIZE | ANALYZER_PARTIAL
"""the default analyzer to be used if nothing is configured"""
QUERY_TYPE = None
"""override this with the query class of the implementation"""
INDEX_DIRECTORY_NAME = None
"""override this with a string to be used as the name of the indexing
directory/file in the filesystem
"""
def __init__(self, basedir, analyzer=None, create_allowed=True):
"""initialize or open an indexing database
Any derived class must override ``__init__``.
Any implementation can rely on the "self.location" attribute to be set
by the ``__init__`` function of the super class.
:raise ValueError: the given location exists, but the database type
is incompatible (e.g. created by a different
indexing engine)
:raise OSError: the database failed to initialize
:param basedir: the parent directory of the database
:type basedir: str
:param analyzer: bitwise combination of possible analyzer flags
to be used as the default analyzer for this
database. Leave it empty to use the system
default analyzer (``self.ANALYZER_DEFAULT``).
see :attr:`CommonDatabase.ANALYZER_TOKENIZE`,
:attr:`CommonDatabase.ANALYZER_PARTIAL`, ...
:type analyzer: int
:param create_allowed: create the database, if necessary.
:type create_allowed: bool
"""
# just do some checks
if self.QUERY_TYPE is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'QUERY_TYPE' is undefined")
if self.INDEX_DIRECTORY_NAME is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'INDEX_DIRECTORY_NAME' is undefined")
self.location = os.path.join(basedir, self.INDEX_DIRECTORY_NAME)
if (not create_allowed) and (not os.path.exists(self.location)):
raise OSError("Indexer: the database does not exist - and I am" \
+ " not configured to create it.")
if analyzer is None:
self.analyzer = self.ANALYZER_DEFAULT
else:
self.analyzer = analyzer
self.field_analyzers = {}
def flush(self, optimize=False):
"""Flush the content of the database - to force changes to be written
to disk.
Some databases also support index optimization.
:param optimize: should the index be optimized if possible?
:type optimize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'flush' is missing")
def make_query(self, args, require_all=True, analyzer=None):
"""Create simple queries (strings or field searches) or
combine multiple queries (AND/OR).
To specifiy rules for field searches, you may want to take a look at
:meth:`~.CommonDatabase.set_field_analyzers`. The parameter
'match_text_partial' can override the previously defined
default setting.
:param args: queries or search string or description of field query
examples::
[xapian.Query("foo"), xapian.Query("bar")]
xapian.Query("foo")
"bar"
{"foo": "bar", "foobar": "foo"}
:type args: list of queries | single query | str | dict
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: boolean
:param analyzer: (only applicable for 'dict' or 'str')
Define query options (partial matching, exact
matching, tokenizing, ...) as bitwise
combinations of *CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is ``None`` (default), then the
configured analyzer for the field is used.
:type analyzer: int
:return: the combined query
:rtype: query type of the specific implementation
"""
# turn a dict into a list if necessary
if isinstance(args, dict):
args = args.items()
# turn 'args' into a list if necessary
if not isinstance(args, list):
args = [args]
# combine all given queries
result = []
for query in args:
# just add precompiled queries
if isinstance(query, self.QUERY_TYPE):
result.append(self._create_query_for_query(query))
# create field/value queries out of a tuple
elif isinstance(query, tuple):
field, value = query
# perform unicode normalization
field = translate.lang.data.normalize(unicode(field))
value = translate.lang.data.normalize(unicode(value))
# check for the choosen match type
if analyzer is None:
analyzer = self.get_field_analyzers(field)
result.append(self._create_query_for_field(field, value,
analyzer=analyzer))
# parse plaintext queries
elif isinstance(query, basestring):
if analyzer is None:
analyzer = self.analyzer
# perform unicode normalization
query = translate.lang.data.normalize(unicode(query))
result.append(self._create_query_for_string(query,
require_all=require_all, analyzer=analyzer))
else:
# other types of queries are not supported
raise ValueError("Unable to handle query type: %s" \
% str(type(query)))
# return the combined query
return self._create_query_combined(result, require_all)
def _create_query_for_query(self, query):
"""Generate a query based on an existing query object.
Basically this function should just create a copy of the original.
:param query: the original query object
:type query: ``xapian.Query``
:return: the resulting query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_query' is missing")
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""Generate a query for a plain term of a string query.
Basically this function parses the string and returns the resulting
query.
:param text: the query string
:type text: str
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_string' is missing")
def _create_query_for_field(self, field, value, analyzer=None):
"""Generate a field query.
This functions creates a field->value query.
:param field: the fieldname to be used
:type field: str
:param value: the wanted value of the field
:type value: str
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_field' is missing")
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
:param queries: list of the original queries
:type queries: list of xapian.Query
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:return: the resulting combined query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_combined' is missing")
def index_document(self, data):
"""Add the given data to the database.
:param data: the data to be indexed.
A dictionary will be treated as ``fieldname:value``
combinations.
If the fieldname is None then the value will be
interpreted as a plain term or as a list of plain terms.
Lists of terms are indexed separately.
Lists of strings are treated as plain terms.
:type data: dict | list of str
"""
doc = self._create_empty_document()
if isinstance(data, dict):
data = data.items()
# add all data
for dataset in data:
if isinstance(dataset, tuple):
# the dataset tuple consists of '(key, value)'
key, value = dataset
if key is None:
if isinstance(value, list):
terms = value[:]
elif isinstance(value, basestring):
terms = [value]
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
for one_term in terms:
self._add_plain_term(doc, self._decode(one_term),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
analyze_settings = self.get_field_analyzers(key)
# handle multiple terms
if not isinstance(value, list):
value = [value]
for one_term in value:
self._add_field_term(doc, key, self._decode(one_term),
(analyze_settings & self.ANALYZER_TOKENIZE > 0))
elif isinstance(dataset, basestring):
self._add_plain_term(doc, self._decode(dataset),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
self._add_document_to_index(doc)
def _create_empty_document(self):
"""Create an empty document to be filled and added to the index later.
:return: the new document object
:rtype: ``xapian.Document`` | ``PyLucene.Document``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_empty_document' is missing")
def _add_plain_term(self, document, term, tokenize=True):
"""Add a term to a document.
:param document: the document to be changed
:type document: ``xapian.Document`` | ``PyLucene.Document``
:param term: a single term to be added
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_plain_term' is missing")
def _add_field_term(self, document, field, term, tokenize=True):
"""Add a field term to a document.
:param document: the document to be changed
:type document: ``xapian.Document`` | ``PyLucene.Document``
:param field: name of the field
:type field: str
:param term: term to be associated to the field
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_field_term' is missing")
def _add_document_to_index(self, document):
"""Add a prepared document to the index database.
:param document: the document to be added
:type document: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_document_to_index' is missing")
def begin_transaction(self):
"""begin a transaction
You can group multiple modifications of a database as a transaction.
This prevents time-consuming database flushing and helps, if you want
that a changeset is committed either completely or not at all.
No changes will be written to disk until 'commit_transaction'.
'cancel_transaction' can be used to revert an ongoing transaction.
Database types that do not support transactions may silently ignore it.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'begin_transaction' is missing")
def cancel_transaction(self):
"""cancel an ongoing transaction
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'cancel_transaction' is missing")
def commit_transaction(self):
"""Submit the currently ongoing transaction and write changes to disk.
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'commit_transaction' is missing")
def get_query_result(self, query):
"""return an object containing the results of a query
:param query: a pre-compiled query
:type query: a query object of the real implementation
:return: an object that allows access to the results
:rtype: subclass of CommonEnquire
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'get_query_result' is missing")
def delete_document_by_id(self, docid):
"""Delete a specified document.
:param docid: the document ID to be deleted
:type docid: int
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'delete_document_by_id' is missing")
def search(self, query, fieldnames):
"""Return a list of the contents of specified fields for all
matches of a query.
:param query: the query to be issued
:type query: a query object of the real implementation
:param fieldnames: the name(s) of a field of the document content
:type fieldnames: string | list of strings
:return: a list of dicts containing the specified field(s)
:rtype: list of dicts
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'search' is missing")
def delete_doc(self, ident):
"""Delete the documents returned by a query.
:param ident: [list of] document IDs | dict describing a query | query
:type ident: int | list of tuples | dict | list of dicts |
query (e.g. xapian.Query) | list of queries
"""
# turn a doc-ID into a list of doc-IDs
if isinstance(ident, list):
# it is already a list
ident_list = ident
else:
ident_list = [ident]
if len(ident_list) == 0:
# no matching items
return 0
if isinstance(ident_list[0], int) or isinstance(ident_list[0], long):
# create a list of IDs of all successfully removed documents
success_delete = [match for match in ident_list
if self.delete_document_by_id(match)]
return len(success_delete)
if isinstance(ident_list[0], dict):
# something like: { "msgid": "foobar" }
# assemble all queries
query = self.make_query([self.make_query(query_dict,
require_all=True) for query_dict in ident_list],
require_all=True)
elif isinstance(ident_list[0], object):
# assume a query object (with 'AND')
query = self.make_query(ident_list, require_all=True)
else:
# invalid element type in list (not necessarily caught in the
# lines above)
raise TypeError("description of documents to-be-deleted is not " \
+ "supported: list of %s" % type(ident_list[0]))
# we successfully created a query - now iterate through the result
# no documents deleted so far ...
remove_list = []
# delete all resulting documents step by step
def add_docid_to_list(match):
"""Collect every document ID."""
remove_list.append(match["docid"])
self._walk_matches(query, add_docid_to_list)
return self.delete_doc(remove_list)
def _walk_matches(self, query, function, arg_for_function=None):
"""Use this function if you want to do something with every single match
of a query.
Example::
self._walk_matches(query, function_for_match, arg_for_func)
*function_for_match* expects only one argument: the matched object
:param query: a query object of the real implementation
:type query: xapian.Query | PyLucene.Query
:param function: the function to execute with every match
:type function: function
:param arg_for_function: an optional argument for the function
:type arg_for_function: anything
"""
# execute the query
enquire = self.get_query_result(query)
# start with the first element
start = 0
# do the loop at least once
size, avail = (0, 1)
# how many results per 'get_matches'?
steps = 2
while start < avail:
(size, avail, matches) = enquire.get_matches(start, steps)
for match in matches:
if arg_for_function is None:
function(match)
else:
function(match, arg_for_function)
start += size
def set_field_analyzers(self, field_analyzers):
"""Set the analyzers for different fields of the database documents.
All bitwise combinations of *CommonIndexer.ANALYZER_???* are possible.
:param field_analyzers: mapping of field names and analyzers
:type field_analyzers: dict containing field names and analyzers
:raise TypeError: invalid values in *field_analyzers*
"""
for field, analyzer in field_analyzers.items():
# check for invald input types
if not isinstance(field, (str, unicode)):
raise TypeError("field name must be a string")
if not isinstance(analyzer, int):
raise TypeError("the analyzer must be a whole number (int)")
# map the analyzer to the field name
self.field_analyzers[field] = analyzer
def get_field_analyzers(self, fieldnames=None):
"""Return the analyzer that was mapped to a specific field.
See :meth:`~.CommonDatabase.set_field_analyzers` for details.
:param fieldnames: the analyzer of this field (or all/multiple fields)
is requested; leave empty (or *None*) to
request all fields.
:type fieldnames: str | list of str | None
:return: The analyzer setting of the field - see
*CommonDatabase.ANALYZER_???* or a dict of field names
and analyzers
:rtype: int | dict
"""
# all field analyzers are requested
if fieldnames is None:
# return a copy
return dict(self.field_analyzers)
# one field is requested
if isinstance(fieldnames, (str, unicode)):
if fieldnames in self.field_analyzers:
return self.field_analyzers[fieldnames]
else:
return self.analyzer
# a list of fields is requested
if isinstance(fieldnames, list):
result = {}
for field in fieldnames:
result[field] = self.get_field_analyzers(field)
return result
return self.analyzer
def _decode(self, text):
"""Decode the string from utf-8 or charmap perform
unicode normalization."""
if isinstance(text, str):
try:
result = unicode(text.decode("UTF-8"))
except UnicodeEncodeError, e:
result = unicode(text.decode("charmap"))
elif not isinstance(text, unicode):
result = unicode(text)
else:
result = text
# perform unicode normalization
return translate.lang.data.normalize(result)
class CommonEnquire(object):
"""An enquire object contains the information about the result of a request.
"""
def __init__(self, enquire):
"""Intialization of a wrapper around enquires of different backends
:param enquire: a previous enquire
:type enquire: xapian.Enquire | pylucene-enquire
"""
self.enquire = enquire
def get_matches(self, start, number):
"""Return a specified number of qualified matches of a previous query.
:param start: index of the first match to return (starting from zero)
:type start: int
:param number: the number of matching entries to return
:type number: int
:return: a set of matching entries and some statistics
:rtype: tuple of (returned number, available number, matches)
"matches" is a dictionary of::
["rank", "percent", "document", "docid"]
"""
raise NotImplementedError("Incomplete indexing implementation: " \
+ "'get_matches' for the 'Enquire' class is missing")
def get_matches_count(self):
"""Return the estimated number of matches.
Use :meth:`translate.search.indexing.CommonIndexer.search`
to retrieve the exact number of matches
:return: The estimated number of matches
:rtype: int
"""
(returned, estimate_count, matches) = self.get_matches(0, 1)
return estimate_count
| bsd-3-clause | -4,645,252,010,590,716,000 | 39.932149 | 85 | 0.588507 | false |
dmeulen/home-assistant | tests/conftest.py | 8 | 1542 | """Setup some common test helper things."""
import functools
import logging
import pytest
import requests_mock as _requests_mock
from homeassistant import util
from homeassistant.util import location
from .common import async_test_home_assistant
from .test_util.aiohttp import mock_aiohttp_client
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
def test_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
def guard_func(*args, **kwargs):
real = kwargs.pop('_test_real', None)
if not real:
raise Exception('Forgot to mock or pass "_test_real=True" to %s',
func.__name__)
return func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.detect_location_info = test_real(location.detect_location_info)
location.elevation = test_real(location.elevation)
util.get_local_ip = lambda: '127.0.0.1'
@pytest.fixture
def hass(loop):
"""Fixture to provide a test instance of HASS."""
hass = loop.run_until_complete(async_test_home_assistant(loop))
yield hass
loop.run_until_complete(hass.async_stop())
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
| mit | 3,167,877,439,778,532,000 | 24.7 | 77 | 0.69131 | false |
thezawad/flexx | flexx/ui/_formlayout.py | 20 | 8438 | """
Example:
.. UIExample:: 200
from flexx import ui
class Example(ui.Widget):
def init(self):
with ui.FormLayout():
ui.Label(text='Pet name:')
self.b1 = ui.LineEdit()
ui.Label(text='Pet Age:')
self.b2 = ui.LineEdit()
ui.Label(text="Pet's Favorite color:")
self.b3 = ui.LineEdit()
ui.Widget(flex=1)
"""
from .. import react
from . import Widget, Layout
class BaseTableLayout(Layout):
""" Abstract base class for layouts that use an HTML table.
Layouts that use this approach are rather bad in performance when
resizing. This is not so much a problem when it is a leaf layout,
but we don't recommend embedding such layouts in each-other.
"""
CSS = """
/* Clear any styling on this table (rendered_html is an IPython thing) */
.flx-basetablelayout, .flx-basetablelayout td, .flx-basetablelayout tr,
.rendered_html .flx-basetablelayout {
border: 0px;
padding: initial;
margin: initial;
background: initial;
}
/* Behave well inside hbox/vbox,
we assume no layouts to be nested inside a table layout */
.flx-hbox > .flx-basetablelayout {
width: auto;
}
.flx-vbox > .flx-basetablelayout {
height: auto;
}
/* In flexed cells, occupy the full space */
td.vflex > .flx-widget {
height: 100%;
}
td.hflex > .flx-widget {
width: 100%;
}
"""
class JS:
def _apply_table_layout(self):
table = self.node
AUTOFLEX = 729 # magic number unlikely to occur in practice
# Get table dimensions
nrows = len(table.children)
ncols = 0
for i in range(len(table.children)):
row = table.children[i]
ncols = max(ncols, len(row.children))
if ncols == 0 and nrows == 0:
return
# Collect flexes
vflexes = []
hflexes = []
for i in range(nrows):
row = table.children[i]
for j in range(ncols):
col = row.children[j]
if (col is undefined) or (len(col.children) == 0):
continue
vflexes[i] = max(vflexes[i] or 0, col.children[0].vflex or 0)
hflexes[j] = max(hflexes[j] or 0, col.children[0].hflex or 0)
# What is the cumulative "flex-value"?
cum_vflex = vflexes.reduce(lambda pv, cv: pv + cv, 0)
cum_hflex = hflexes.reduce(lambda pv, cv: pv + cv, 0)
# If no flexes are given; assign each equal
if (cum_vflex == 0):
for i in range(len(vflexes)):
vflexes[i] = AUTOFLEX
cum_vflex = len(vflexes) * AUTOFLEX
if (cum_hflex == 0):
for i in range(len(hflexes)):
hflexes[i] = AUTOFLEX
cum_hflex = len(hflexes) * AUTOFLEX
# Assign css class and height/weight to cells
for i in range(nrows):
row = table.children[i]
row.vflex = vflexes[i] or 0 # Store for use during resizing
for j in range(ncols):
col = row.children[j];
if (col is undefined) or (col.children.length is 0):
continue
self._apply_cell_layout(row, col, vflexes[i], hflexes[j], cum_vflex, cum_hflex)
@react.connect('actual_size')
def _adapt_to_size_change(self, size):
""" This function adapts the height (in percent) of the flexible rows
of a layout. This is needed because the percent-height applies to the
total height of the table. This function is called whenever the
table resizes, and adjusts the percent-height, taking the available
remaining table height into account. This is not necesary for the
width, since percent-width in colums *does* apply to available width.
"""
table = self.node # or event.target
#print('heigh changed', event.heightChanged, event.owner.__id)
if not self.actual_size.last_value or (self.actual_size.value[1] !=
self.actual_size.last_value[1]):
# Set one flex row to max, so that non-flex rows have their
# minimum size. The table can already have been stretched
# a bit, causing the total row-height in % to not be
# sufficient from keeping the non-flex rows from growing.
for i in range(len(table.children)):
row = table.children[i]
if (row.vflex > 0):
row.style.height = '100%'
break
# Get remaining height: subtract height of each non-flex row
remainingHeight = table.clientHeight
cum_vflex = 0
for i in range(len(table.children)):
row = table.children[i]
cum_vflex += row.vflex
if (row.vflex == 0) and (row.children.length > 0):
remainingHeight -= row.children[0].clientHeight
# Apply height % for each flex row
remainingPercentage = 100 * remainingHeight / table.clientHeight
for i in range(len(table.children)):
row = table.children[i]
if row.vflex > 0:
row.style.height = round(row.vflex / cum_vflex * remainingPercentage) + 1 + '%'
def _apply_cell_layout(self, row, col, vflex, hflex, cum_vflex, cum_hflex):
raise NotImplementedError()
class FormLayout(BaseTableLayout):
""" A form layout organizes pairs of widgets vertically.
Note: the API may change. maybe the label can be derived from the
widgets' ``title`` property?
"""
CSS = """
.flx-formlayout > tr > td > .flx-label {
text-align: right;
}
"""
class JS:
def _create_node(self):
this.node = document.createElement('table')
this.node.appendChild(document.createElement('tr'))
def _add_child(self, widget):
# Get row, create if necessary
row = this.node.children[-1]
itemsInRow = row.children.length
if itemsInRow >= 2:
row = document.createElement('tr')
self.node.appendChild(row)
# Create td and add widget to it
td = document.createElement("td")
row.appendChild(td)
td.appendChild(widget.node)
#
self._update_layout()
self._apply_table_layout()
# do not call super!
def _update_layout(self):
""" Set hflex and vflex on node.
"""
i = 0
for widget in self.children():
i += 1
widget.node.hflex = 0 if (i % 2) else 1
widget.node.vflex = widget.flex()
self._apply_table_layout()
def _remove_child(self, widget):
pass
# do not call super!
def _apply_cell_layout(self, row, col, vflex, hflex, cum_vflex, cum_hflex):
AUTOFLEX = 729
className = ''
if (vflex == AUTOFLEX) or (vflex == 0):
row.style.height = 'auto'
className += ''
else:
row.style.height = vflex * 100 / cum_vflex + '%'
className += 'vflex'
className += ' '
if (hflex == 0):
col.style.width = 'auto'
className += ''
else:
col.style.width = '100%'
className += 'hflex'
col.className = className
class GridLayout(BaseTableLayout):
""" Not implemented.
Do we even need it? If we do implement it, we need a way to specify
the vertical flex value.
"""
| bsd-2-clause | -3,415,314,386,201,838,600 | 35.528139 | 103 | 0.50474 | false |
louietsai/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/X509CertChain.py | 238 | 6861 | """Class representing an X.509 certificate chain."""
from utils import cryptomath
class X509CertChain:
"""This class represents a chain of X.509 certificates.
@type x509List: list
@ivar x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
def __init__(self, x509List=None):
"""Create a new X509CertChain.
@type x509List: list
@param x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
if x509List:
self.x509List = x509List
else:
self.x509List = []
def getNumCerts(self):
"""Get the number of certificates in this chain.
@rtype: int
"""
return len(self.x509List)
def getEndEntityPublicKey(self):
"""Get the public key from the end-entity certificate.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].publicKey
def getFingerprint(self):
"""Get the hex-encoded fingerprint of the end-entity certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getFingerprint()
def getCommonName(self):
"""Get the Subject's Common Name from the end-entity certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getCommonName()
def validate(self, x509TrustList):
"""Check the validity of the certificate chain.
This checks that every certificate in the chain validates with
the subsequent one, until some certificate validates with (or
is identical to) one of the passed-in root certificates.
The cryptlib_py module must be installed in order to use this
function.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
certificate chain must extend to one of these certificates to
be considered valid.
"""
import cryptlib_py
c1 = None
c2 = None
lastC = None
rootC = None
try:
rootFingerprints = [c.getFingerprint() for c in x509TrustList]
#Check that every certificate in the chain validates with the
#next one
for cert1, cert2 in zip(self.x509List, self.x509List[1:]):
#If we come upon a root certificate, we're done.
if cert1.getFingerprint() in rootFingerprints:
return True
c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
try:
cryptlib_py.cryptCheckCert(c1, c2)
except:
return False
cryptlib_py.cryptDestroyCert(c1)
c1 = None
cryptlib_py.cryptDestroyCert(c2)
c2 = None
#If the last certificate is one of the root certificates, we're
#done.
if self.x509List[-1].getFingerprint() in rootFingerprints:
return True
#Otherwise, find a root certificate that the last certificate
#chains to, and validate them.
lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(),
cryptlib_py.CRYPT_UNUSED)
for rootCert in x509TrustList:
rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
if self._checkChaining(lastC, rootC):
try:
cryptlib_py.cryptCheckCert(lastC, rootC)
return True
except:
return False
return False
finally:
if not (c1 is None):
cryptlib_py.cryptDestroyCert(c1)
if not (c2 is None):
cryptlib_py.cryptDestroyCert(c2)
if not (lastC is None):
cryptlib_py.cryptDestroyCert(lastC)
if not (rootC is None):
cryptlib_py.cryptDestroyCert(rootC)
def _checkChaining(self, lastC, rootC):
import cryptlib_py
import array
def compareNames(name):
try:
length = cryptlib_py.cryptGetAttributeString(lastC, name, None)
lastName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(lastC, name, lastName)
lastName = lastName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
lastName = None
try:
length = cryptlib_py.cryptGetAttributeString(rootC, name, None)
rootName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(rootC, name, rootName)
rootName = rootName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
rootName = None
return lastName == rootName
cryptlib_py.cryptSetAttribute(lastC,
cryptlib_py.CRYPT_CERTINFO_ISSUERNAME,
cryptlib_py.CRYPT_UNUSED)
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME):
return False
return True | apache-2.0 | -6,394,964,883,665,253,000 | 35.917127 | 79 | 0.552398 | false |
miniconfig/home-assistant | homeassistant/components/switch/netio.py | 15 | 5672 | """
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_STOP, STATE_ON)
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pynetio==0.1.6']
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POWER_MWH = 'current_power_mwh'
ATTR_CURRENT_POWER_W = 'current_power_w'
ATTR_START_DATE = 'start_date'
ATTR_TODAY_MWH = 'today_mwh'
ATTR_TOTAL_CONSUMPTION_KWH = 'total_energy_kwh'
CONF_OUTLETS = 'outlets'
DEFAULT_PORT = 1234
DEFAULT_USERNAME = 'admin'
DEPENDENCIES = ['http']
Device = namedtuple('device', ['netio', 'entities'])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = '/api/netio/{host}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Configure the Netio platform."""
from pynetio import Netio
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if len(DEVICES) == 0:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all NetioSwitches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(
DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_devices(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = 'api:netio'
@callback
def get(self, request, host):
"""Request handler."""
hass = request.app['hass']
data = request.GET
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
hass.async_add_job(dev.async_update_ha_state())
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a netio linked switch."""
def __init__(self, netio, outlet, name):
"""Defined to handle throttle."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Netio device's name."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return not hasattr(self, 'telnet')
def turn_on(self):
"""Turn switch on."""
self._set(True)
def turn_off(self):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[self.outlet - 1] = '1' if value else '0'
self.netio.get('port list %s' % ''.join(val))
self.netio.states[self.outlet - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return switch's status."""
return self.netio.states[self.outlet - 1]
def update(self):
"""Called by Home Assistant."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_CURRENT_POWER_W: self.current_power_w,
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[self.outlet - 1]
@property
def cumulated_consumption_kwh(self):
"""Total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[self.outlet - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[self.outlet - 1]
| mit | -4,531,606,275,309,736,400 | 28.388601 | 75 | 0.639457 | false |
TaiSakuma/AlphaTwirl | tests/unit/roottree/test_EventBuilder.py | 1 | 2175 | import unittest
import sys
from alphatwirl.roottree import EventBuilderConfig
##__________________________________________________________________||
hasROOT = False
try:
import ROOT
hasROOT = True
except ImportError:
pass
if hasROOT:
from alphatwirl.roottree.EventBuilder import EventBuilder
##__________________________________________________________________||
class MockTChain(object):
def __init__(self, name):
self.treeName = name
self.paths = [ ]
def Add(self, name):
self.paths.append(name)
##__________________________________________________________________||
class MockROOT(object):
def __init__(self):
self.TChain = MockTChain
##__________________________________________________________________||
class MockEvents(object):
def __init__(self, tree, maxEvents, start = 0):
self.tree = tree
self.maxEvents = maxEvents
self.start = start
##__________________________________________________________________||
@unittest.skipUnless(hasROOT, "has no ROOT")
class TestEventBuilder(unittest.TestCase):
def setUp(self):
self.module = sys.modules['alphatwirl.roottree.EventBuilder']
self.orgROOT = self.module.ROOT
self.module.ROOT = MockROOT()
self.orgEvents = self.module.Events
self.module.Events = MockEvents
def tearDown(self):
self.module.ROOT = self.orgROOT
self.module.BEvents = self.orgEvents
def test_build(self):
config = EventBuilderConfig(
inputPaths = ['/heppyresult/dir/TTJets/treeProducerSusyAlphaT/tree.root'],
treeName = 'tree',
maxEvents = 123,
start = 11,
name = 'TTJets'
)
obj = EventBuilder(config)
events = obj()
self.assertEqual(['/heppyresult/dir/TTJets/treeProducerSusyAlphaT/tree.root'], events.tree.paths)
self.assertIsInstance(events, MockEvents)
self.assertEqual('tree', events.tree.treeName)
self.assertEqual(11, events.start)
self.assertEqual(123, events.maxEvents)
##__________________________________________________________________||
| bsd-3-clause | -6,923,056,598,544,100,000 | 28.391892 | 105 | 0.514943 | false |
TheWaunaKeeganOrganization/Yahtzee | src/yahtzee_categories.py | 1 | 1373 | from collections import Counter
def ones(d):
return 1*d.count(1)
def twos(d):
return 2*d.count(2)
def threes(d):
return 3*d.count(3)
def fours(d):
return 4*d.count(4)
def fives(d):
return 5*d.count(5)
def sixes(d):
return 6*d.count(6)
def threeOfAKind(d):
if max(Counter(d).itervalues())>=3:
return sum(d)
return 0
def fourOfAKind(d):
if max(Counter(d).itervalues())>=4:
return sum(d)
return 0
def fullHouse(d):
if (list(Counter(d).itervalues())[0]==3 and list(Counter(d).itervalues())[1]==2) or (list(Counter(d).itervalues())[0]==2 and list(Counter(d).itervalues())[1]==3):
return 25
return 0
def smallStraight(d):
s=min(d)
if s+1 in d and s+2 in d and s+3 in d:
return 30
return 0
def largeStraight(d):
s=min(d)
if s+1 in d and s+2 in d and s+3 in d and s+4 in d:
return 30
return 0
def yahtzee(d):
if d.count(d[0])==5:
return 50
return 0
def chance(d):
return sum(d)
def allCategories(d):
scores={}
scores["ones"]=ones(d)
scores["twos"]=twos(d)
scores["threes"]=threes(d)
scores["fours"]=fours(d)
scores["fives"]=fives(d)
scores["sixes"]=sixes(d)
scores["threeOfAKind"]=threeOfAKind(d)
scores["fourOfAKind"]=fourOfAKind(d)
scores["fullHouse"]=fullHouse(d)
scores["smallStraight"]=smallStraight(d)
scores["largeStraight"]=largeStraight(d)
scores["yahtzee"]=yahtzee(d)
scores["chance"]=chance(d)
return scores
| gpl-3.0 | -3,669,609,660,809,892,000 | 18.338028 | 163 | 0.672251 | false |
chaos-adept/timelyb | appengine_config.py | 1 | 1421 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sample remote_api appengine_config for copying datastore across apps.
For more information, see
http://code.google.com/appengine/docs/adminconsole/
Note that this appengine_config.py file is the same one that you would
use for appstats; if you are bundling this with your existing app you may
wish to copy the version from
google/appengine/ext/appstats/sample_appenigne_config.py instead.
"""
#########################################
# Remote_API Authentication configuration.
#
# See google/appengine/ext/remote_api/handler.py for more information.
# For datastore_admin datastore copy, you should set the source appid
# value. 'HTTP_X_APPENGINE_INBOUND_APPID', ['trusted source appid here']
#
remoteapi_CUSTOM_ENVIRONMENT_AUTHENTICATION = (
'HTTP_X_APPENGINE_INBOUND_APPID', ['timelyb-helloword-server'])
| gpl-3.0 | 5,118,230,771,141,126,000 | 37.405405 | 74 | 0.746657 | false |
glenux/contrib-mitro | browser-ext/third_party/firefox-addon-sdk/python-lib/simplejson/encoder.py | 67 | 13492 | """
Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
pass
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return FLOAT_REPR(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = c_encode_basestring_ascii
except NameError:
encode_basestring_ascii = py_encode_basestring_ascii
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
__all__ = ['JSONEncoder']
| gpl-3.0 | -5,677,495,146,495,276,000 | 34.044156 | 89 | 0.532983 | false |
westial/NdArrayIndexer.py | tests/test_NdArrayIndexer_3axes.py | 1 | 4489 | #!/usr/bin/env python
#
# Testing ndarray with 3 axes
#
import numpy as np
from NdArrayIndexer import NdArrayIndexer
# Structure of unsorted list to be converted in the same shape as testing_array
testing_list = [
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]],
[[[7, 2, 76], [132, 32, 1], [201, 23, 224], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[1, 20, 203], [51, 212, 13], [21, 102, 1], [201, 23, 224]],
[[4, 5, 6], [11, 12, 13], [21, 22, 23], [201, 23, 224]],
[[101, 102, 103], [111, 112, 113], [121, 122, 123], [201, 23, 224]],
[[201, 202, 203], [211, 212, 213], [221, 222, 223], [201, 23, 224]]]
]
testing_array = np.array(testing_list)
print testing_array
print "------------------"
arr = NdArrayIndexer(testing_array)
arr.run()
print arr.get() | gpl-3.0 | 8,476,150,344,816,096,000 | 47.804348 | 79 | 0.462018 | false |
gerryhd/diabot-assistant | lib/python2.7/site-packages/jinja2/compiler.py | 117 | 62929 | # -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from functools import update_wrapper
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.optimizer import Optimizer
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, text_type, string_types, \
iteritems, NativeStringIO, imap, izip
from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
code_features = ['division']
# does this python version support generator stops? (PEP 0479)
try:
exec('from __future__ import generator_stop')
code_features.append('generator_stop')
except SyntaxError:
pass
# does this python version support yield from?
try:
exec('def f(): yield from x()')
except SyntaxError:
supports_yield_from = False
else:
supports_yield_from = True
def optimizeconst(f):
def new_func(self, node, frame, **kwargs):
# Only optimize if the frame is not volatile
if self.optimized and not frame.eval_ctx.volatile:
new_node = self.optimizer.visit(node, frame.eval_ctx)
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
return update_wrapper(new_func, f)
def generate(node, environment, name, filename, stream=None,
defer_init=False, optimized=True):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = environment.code_generator_class(environment, name, filename,
stream, defer_init,
optimized)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
return True
if type(value) in (tuple, list, set, frozenset):
for item in value:
if not has_safe_repr(item):
return False
return True
elif type(value) is dict:
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class MacroRef(object):
def __init__(self, node):
self.node = node
self.accesses_caller = False
self.accesses_kwargs = False
self.accesses_varargs = False
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.symbols = Symbols(parent and parent.symbols or None)
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# the parent of this frame
self.parent = parent
if parent is not None:
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.symbols = self.symbols.copy()
return rv
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
This is only used to implement if-statements.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False, optimized=True):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
self.optimized = optimized
if optimized:
self.optimizer = Optimizer(environment)
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# Tracks toplevel assignments
self._assign_stack = []
# Tracks parameter definition blocks
self._param_def_block = []
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
return
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
self.writeline('pass')
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def enter_frame(self, frame):
undefs = []
for target, (action, param) in iteritems(frame.symbols.loads):
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
self.writeline('%s = resolve(%r)' %
(target, param))
elif action == VAR_LOAD_ALIAS:
self.writeline('%s = %s' % (target, param))
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
raise NotImplementedError('unknown load instruction')
if undefs:
self.writeline('%s = missing' % ' = '.join(undefs))
def leave_frame(self, frame, with_python_scope=False):
if not with_python_scope:
undefs = []
for target, _ in iteritems(frame.symbols.loads):
undefs.append(target)
if undefs:
self.writeline('%s = missing' % ' = '.join(undefs))
def func(self, name):
if self.environment.is_async:
return 'async def %s' % name
return 'def %s' % name
def macro_body(self, node, frame):
"""Dump the function def of a macro or call block."""
frame = frame.inner()
frame.symbols.analyze_node(node)
macro_ref = MacroRef(node)
explicit_caller = None
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
if arg.name == 'caller':
explicit_caller = idx
if arg.name in ('kwargs', 'varargs'):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
# In older Jinja2 versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
# checking this now and error out if it is anywhere else in
# the argument list.
if explicit_caller is not None:
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
self.fail('When defining macros or call blocks the '
'special "caller" argument must be omitted '
'or be given a default.', node.lineno)
else:
args.append(frame.symbols.declare_parameter('caller'))
macro_ref.accesses_caller = True
if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
args.append(frame.symbols.declare_parameter('kwargs'))
macro_ref.accesses_kwargs = True
if 'varargs' in undeclared and not 'varargs' in skip_special_params:
args.append(frame.symbols.declare_parameter('varargs'))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
self.indent()
self.buffer(frame)
self.enter_frame(frame)
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
self.writeline('if %s is missing:' % ref)
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
self.writeline('%s = undefined(%r, name=%r)' % (
ref,
'parameter %r was not provided' % arg.name,
arg.name))
else:
self.writeline('%s = ' % ref)
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
self.pop_parameter_definitions()
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame, force_unescaped=True)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
return frame, macro_ref
def macro_def(self, macro_ref, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
name = getattr(macro_ref.node, 'name', None)
if len(macro_ref.node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
'context.eval_ctx.autoescape)' %
(name, arg_tuple, macro_ref.accesses_kwargs,
macro_ref.accesses_varargs, macro_ref.accesses_caller))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
def dump_local_context(self, frame):
return '{%s}' % ', '.join(
'%r: %s' % (name, target) for name, target
in iteritems(frame.symbols.dump_stores()))
def write_commons(self):
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
self.writeline('resolve = context.resolve_or_missing')
self.writeline('undefined = environment.undefined')
self.writeline('if 0: yield None')
def push_parameter_definitions(self, frame):
"""Pushes all parameter targets from the given frame into a local
stack that permits tracking of yet to be assigned parameters. In
particular this enables the optimization from `visit_Name` to skip
undefined expressions for parameters in macros as macros can reference
otherwise unbound parameters.
"""
self._param_def_block.append(frame.symbols.dump_param_targets())
def pop_parameter_definitions(self):
"""Pops the current parameter definitions set."""
self._param_def_block.pop()
def mark_parameter_stored(self, target):
"""Marks a parameter in the current parameter definitions as stored.
This will skip the enforced undefined checks.
"""
if self._param_def_block:
self._param_def_block[-1].discard(target)
def parameter_is_undeclared(self, target):
"""Checks if a given target is an undeclared parameter."""
if not self._param_def_block:
return False
return target in self._param_def_block[-1]
def push_assign_tracking(self):
"""Pushes a new layer for assignment tracking."""
self._assign_stack.append(set())
def pop_assign_tracking(self, frame):
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
public_names = [x for x in vars if x[:1] != '_']
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
self.writeline('context.vars[%r] = %s' % (name, ref))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(vars):
if idx:
self.write(', ')
ref = frame.symbols.ref(name)
self.write('%r: %s' % (name, ref))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import %s' % ', '.join(code_features))
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if self.environment.is_async:
self.writeline('from jinja2.asyncsupport import auto_await, '
'auto_aiter, make_async_loop_context')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('%s(context, missing=missing%s):' %
(self.func('root'), envenv), extra=1)
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
if 'self' in find_undeclared(node.body, ('self',)):
ref = frame.symbols.declare_parameter('self')
self.writeline('%s = TemplateReference(context)' % ref)
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
self.writeline('parent_template = None')
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
if supports_yield_from and not self.environment.is_async:
self.writeline('yield from parent_template.'
'root_render_func(context)')
else:
self.writeline('%sfor event in parent_template.'
'root_render_func(context):' %
(self.environment.is_async and 'async ' or ''))
self.indent()
self.writeline('yield event')
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
self.writeline('%s(context, missing=missing%s):' %
(self.func('block_' + name), envenv),
block, 1)
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
ref = block_frame.symbols.declare_parameter('self')
self.writeline('%s = TemplateReference(context)' % ref)
if 'super' in undeclared:
ref = block_frame.symbols.declare_parameter('super')
self.writeline('%s = context.super(%r, '
'block_%s)' % (ref, name, name))
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.enter_frame(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and (
'context.derived(%s)' % self.dump_local_context(frame)) or 'context'
if supports_yield_from and not self.environment.is_async and \
frame.buffer is None:
self.writeline('yield from context.blocks[%r][0](%s)' % (
node.name, context), node)
else:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in context.blocks[%r][0](%s):' % (
loop, node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent()
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
skip_event_yield = False
if node.with_context:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in template.root_render_func('
'template.new_context(context.get_all(), True, '
'%s)):' % (loop, self.dump_local_context(frame)))
elif self.environment.is_async:
self.writeline('for event in (await '
'template._get_default_module_async())'
'._body_stream:')
else:
if supports_yield_from:
self.writeline('yield from template._get_default_module()'
'._body_stream')
skip_event_yield = True
else:
self.writeline('for event in template._get_default_module()'
'._body_stream:')
if not skip_event_yield:
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
self.writeline('%s = ' % frame.symbols.ref(node.target), node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
if self.environment.is_async:
self.write('await ')
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = %senvironment.get_template('
% (self.environment.is_async and 'await ' or ''))
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('%s = getattr(included_template, '
'%r, missing)' % (frame.symbols.ref(alias), name))
self.writeline('if %s is missing:' % frame.symbols.ref(alias))
self.indent()
self.writeline('%s = undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(frame.symbols.ref(alias),
'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = %s' %
(name, frame.symbols.ref(name)))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
loop_frame = frame.inner()
test_frame = frame.inner()
else_frame = frame.inner()
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
loop_ref = None
if extended_loop:
loop_ref = loop_frame.symbols.declare_parameter('loop')
loop_frame.symbols.analyze_node(node, for_branch='body')
if node.else_:
else_frame.symbols.analyze_node(node, for_branch='else')
if node.test:
loop_filter_func = self.temporary_identifier()
test_frame.symbols.analyze_node(node, for_branch='test')
self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
self.indent()
self.enter_frame(test_frame)
self.writeline(self.environment.is_async and 'async for ' or 'for ')
self.visit(node.target, loop_frame)
self.write(' in ')
self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
self.write(':')
self.indent()
self.writeline('if ', node.test)
self.visit(node.test, test_frame)
self.write(':')
self.indent()
self.writeline('yield ')
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
self.writeline('%s(reciter, loop_render_func, depth=0):' %
self.func('loop'), node)
self.indent()
self.buffer(loop_frame)
# Use the same buffer for the else frame
else_frame.buffer = loop_frame.buffer
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('%s = missing' % loop_ref)
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
self.visit(node.target, loop_frame)
if extended_loop:
if self.environment.is_async:
self.write(', %s in await make_async_loop_context(' % loop_ref)
else:
self.write(', %s in LoopContext(' % loop_ref)
else:
self.write(' in ')
if node.test:
self.write('%s(' % loop_filter_func)
if node.recursive:
self.write('reciter')
else:
if self.environment.is_async and not extended_loop:
self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
self.write(')')
if node.test:
self.write(')')
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
self.indent()
self.enter_frame(loop_frame)
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
self.leave_frame(loop_frame, with_python_scope=node.recursive
and not node.else_)
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
self.leave_frame(else_frame)
self.outdent()
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
if self.environment.is_async:
self.write('await ')
self.write('loop(')
if self.environment.is_async:
self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async:
self.write(')')
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
ref = frame.symbols.ref(node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('%s = ' % frame.symbols.ref(node.name))
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node, frame):
call_frame, macro_ref = self.macro_body(node, frame)
self.writeline('caller = ')
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.symbols.analyze_node(node)
self.enter_frame(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.leave_frame(filter_frame)
def visit_With(self, node, frame):
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
self.newline()
self.visit(target, with_frame)
self.write(' = ')
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
allow_constant_finalize = True
if self.environment.finalize:
func = self.environment.finalize
if getattr(func, 'contextfunction', False) or \
getattr(func, 'evalcontextfunction', False):
allow_constant_finalize = False
elif getattr(func, 'environmentfunction', False):
finalize = lambda x: text_type(
self.environment.finalize(self.environment, x))
else:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
if not allow_constant_finalize:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ',')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(escape if context.eval_ctx.autoescape'
' else to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
"contextfunction", False):
self.write('context, ')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(',')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(escape if context.eval_ctx.autoescape else'
' to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
'contextfunction', False):
self.write('context, ')
elif getattr(self.environment.finalize,
'evalcontextfunction', False):
self.write('context.eval_ctx, ')
elif getattr(self.environment.finalize,
'environmentfunction', False):
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def visit_Assign(self, node, frame):
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
self.write(' = ')
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
def visit_AssignBlock(self, node, frame):
self.push_assign_tracking()
block_frame = frame.inner()
# This is a special case. Since a set block always captures we
# will disable output checks. This way one can use set blocks
# toplevel even in extended templates.
block_frame.require_output_check = False
block_frame.symbols.analyze_node(node)
self.enter_frame(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
self.write(' = (Markup if context.eval_ctx.autoescape '
'else identity)(concat(%s))' % block_frame.buffer)
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
if node.ctx == 'load':
load = frame.symbols.find_load(ref)
if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
not self.parameter_is_undeclared(ref)):
self.write('(undefined(name=%r) if %s is missing else %s)' %
(node.name, ref, ref))
return
self.write(ref)
def visit_Const(self, node, frame):
val = node.as_const(frame.eval_ctx)
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
@optimizeconst
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
@optimizeconst
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
@optimizeconst
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
@optimizeconst
def visit_Filter(self, node, frame):
if self.environment.is_async:
self.write('await auto_await(')
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
if self.environment.is_async:
self.write(')')
@optimizeconst
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
@optimizeconst
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
@optimizeconst
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.is_async:
self.write('await auto_await(')
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
if self.environment.is_async:
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
| gpl-3.0 | -8,858,153,802,111,853,000 | 37.06957 | 87 | 0.548936 | false |
OTWillems/GEO1005 | SpatialDecision/external/networkx/generators/tests/test_directed.py | 77 | 1313 | #!/usr/bin/env python
"""Generators - Directed Graphs
----------------------------
"""
from nose.tools import *
from networkx import *
from networkx.generators.directed import *
class TestGeneratorsDirected():
def test_smoke_test_random_graphs(self):
G=gn_graph(100)
G=gnr_graph(100,0.5)
G=gnc_graph(100)
G=scale_free_graph(100)
def test_create_using_keyword_arguments(self):
assert_raises(networkx.exception.NetworkXError,
gn_graph, 100, create_using=Graph())
assert_raises(networkx.exception.NetworkXError,
gnr_graph, 100, 0.5, create_using=Graph())
assert_raises(networkx.exception.NetworkXError,
gnc_graph, 100, create_using=Graph())
assert_raises(networkx.exception.NetworkXError,
scale_free_graph, 100, create_using=Graph())
G=gn_graph(100,seed=1)
MG=gn_graph(100,create_using=MultiDiGraph(),seed=1)
assert_equal(G.edges(), MG.edges())
G=gnr_graph(100,0.5,seed=1)
MG=gnr_graph(100,0.5,create_using=MultiDiGraph(),seed=1)
assert_equal(G.edges(), MG.edges())
G=gnc_graph(100,seed=1)
MG=gnc_graph(100,create_using=MultiDiGraph(),seed=1)
assert_equal(G.edges(), MG.edges())
| gpl-2.0 | -3,057,644,906,477,777,000 | 35.472222 | 66 | 0.605484 | false |
joequant/zipline | zipline/finance/blotter.py | 29 | 14087 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import uuid
from copy import copy
from logbook import Logger
from collections import defaultdict
from six import text_type, iteritems
from six.moves import filter
import zipline.errors
import zipline.protocol as zp
from zipline.finance.slippage import (
VolumeShareSlippage,
transact_partial,
check_order_triggers
)
from zipline.finance.commission import PerShare
from zipline.utils.protocol_utils import Enum
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = Logger('Blotter')
ORDER_STATUS = Enum(
'OPEN',
'FILLED',
'CANCELLED',
'REJECTED',
'HELD',
)
class Blotter(object):
def __init__(self):
self.transact = transact_partial(VolumeShareSlippage(), PerShare())
# these orders are aggregated by sid
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last
# event.
self.new_orders = []
self.current_dt = None
self.max_shares = int(1e+11)
def __repr__(self):
return """
{class_name}(
transact_partial={transact_partial},
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(class_name=self.__class__.__name__,
transact_partial=self.transact.args,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt)
def set_date(self, dt):
self.current_dt = dt
def order(self, sid, amount, style, order_id=None):
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
"""
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(sid, amount)
Limit order: order(sid, amount, style=LimitOrder(limit_price))
Stop order: order(sid, amount, style=StopOrder(stop_price))
StopLimit order: order(sid, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
if amount == 0:
# Don't bother placing orders for 0 shares.
return
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError("Can't order more than %d shares" %
self.max_shares)
is_buy = (amount > 0)
order = Order(
dt=self.current_dt,
sid=sid,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
self.open_orders[order.sid].append(order)
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
def cancel(self, order_id):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.sid]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def reject(self, order_id, reason=''):
"""
Mark the given order as 'rejected', which is functionally similar to
cancelled. The distinction is that rejections are involuntary (and
usually include a message from a broker indicating why the order was
rejected) while cancels are typically user-driven.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
order_list = self.open_orders[cur_order.sid]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.reject(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def hold(self, order_id, reason=''):
"""
Mark the order with order_id as 'held'. Held is functionally similar
to 'open'. When a fill (full or partial) arrives, the status
will automatically change back to open/filled as necessary.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.hold(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def process_split(self, split_event):
if split_event.sid not in self.open_orders:
return
orders_to_modify = self.open_orders[split_event.sid]
for order in orders_to_modify:
order.handle_split(split_event)
def process_benchmark(self, benchmark_event):
return
yield
def process_trade(self, trade_event):
if trade_event.sid not in self.open_orders:
return
if trade_event.volume < 1:
# there are zero volume trade_events bc some stocks trade
# less frequently than once per minute.
return
orders = self.open_orders[trade_event.sid]
orders.sort(key=lambda o: o.dt)
# Only use orders for the current day or before
current_orders = filter(
lambda o: o.dt <= trade_event.dt,
orders)
processed_orders = []
for txn, order in self.process_transactions(trade_event,
current_orders):
processed_orders.append(order)
yield txn, order
# remove closed orders. we should only have to check
# processed orders
def not_open(order):
return not order.open
closed_orders = filter(not_open, processed_orders)
for order in closed_orders:
orders.remove(order)
if len(orders) == 0:
del self.open_orders[trade_event.sid]
def process_transactions(self, trade_event, current_orders):
for order, txn in self.transact(trade_event, current_orders):
if txn.type == zp.DATASOURCE_TYPE.COMMISSION:
order.commission = (order.commission or 0.0) + txn.cost
else:
if txn.amount == 0:
raise zipline.errors.TransactionWithNoAmount(txn=txn)
if math.copysign(1, txn.amount) != order.direction:
raise zipline.errors.TransactionWithWrongDirection(
txn=txn, order=order)
if abs(txn.amount) > abs(self.orders[txn.order_id].amount):
raise zipline.errors.TransactionVolumeExceedsOrder(
txn=txn, order=order)
order.filled += txn.amount
if txn.commission is not None:
order.commission = ((order.commission or 0.0) +
txn.commission)
# mark the date of the order to match the transaction
# that is filling it.
order.dt = txn.dt
yield txn, order
def __getstate__(self):
state_to_save = ['new_orders', 'orders', '_status']
state_dict = {k: self.__dict__[k] for k in state_to_save
if k in self.__dict__}
# Have to handle defaultdicts specially
state_dict['open_orders'] = dict(self.open_orders)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
self.__init__()
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Blotter saved is state too old.")
open_orders = defaultdict(list)
open_orders.update(state.pop('open_orders'))
self.open_orders = open_orders
self.__dict__.update(state)
class Order(object):
def __init__(self, dt, sid, amount, stop=None, limit=None, filled=0,
commission=None, id=None):
"""
@dt - datetime.datetime that the order was placed
@sid - stock sid of the order
@amount - the number of shares to buy/sell
a positive sign indicates a buy
a negative sign indicates a sell
@filled - how many shares of the order have been filled so far
"""
# get a string representation of the uuid.
self.id = id or self.make_id()
self.dt = dt
self.reason = None
self.created = dt
self.sid = sid
self.amount = amount
self.filled = filled
self.commission = commission
self._status = ORDER_STATUS.OPEN
self.stop = stop
self.limit = limit
self.stop_reached = False
self.limit_reached = False
self.direction = math.copysign(1, self.amount)
self.type = zp.DATASOURCE_TYPE.ORDER
def make_id(self):
return uuid.uuid4().hex
def to_dict(self):
py = copy(self.__dict__)
for field in ['type', 'direction', '_status']:
del py[field]
py['status'] = self.status
return py
def to_api_obj(self):
pydict = self.to_dict()
obj = zp.Order(initial_values=pydict)
return obj
def check_triggers(self, event):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
check_order_triggers(self, event)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = event.dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None
def handle_split(self, split_event):
ratio = split_event.ratio
# update the amount, limit_price, and stop_price
# by the split's ratio
# info here: http://finra.complinet.com/en/display/display_plain.html?
# rbid=2403&element_id=8950&record_id=12208&print=1
# new_share_amount = old_share_amount / ratio
# new_price = old_price * ratio
self.amount = int(self.amount / ratio)
if self.limit is not None:
self.limit = round(self.limit * ratio, 2)
if self.stop is not None:
self.stop = round(self.stop * ratio, 2)
@property
def status(self):
if not self.open_amount:
return ORDER_STATUS.FILLED
elif self._status == ORDER_STATUS.HELD and self.filled:
return ORDER_STATUS.OPEN
else:
return self._status
@status.setter
def status(self, status):
self._status = status
def cancel(self):
self.status = ORDER_STATUS.CANCELLED
def reject(self, reason=''):
self.status = ORDER_STATUS.REJECTED
self.reason = reason
def hold(self, reason=''):
self.status = ORDER_STATUS.HELD
self.reason = reason
@property
def open(self):
return self.status in [ORDER_STATUS.OPEN, ORDER_STATUS.HELD]
@property
def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True
@property
def open_amount(self):
return self.amount - self.filled
def __repr__(self):
"""
String representation for this object.
"""
return "Order(%s)" % self.to_dict().__repr__()
def __unicode__(self):
"""
Unicode representation for this object.
"""
return text_type(repr(self))
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['_status'] = self._status
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Order saved state is too old.")
self.__dict__.update(state)
| apache-2.0 | -1,606,639,483,723,169,500 | 30.514541 | 78 | 0.582168 | false |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/app/demoutils.py | 34 | 1309 | # Utilities for the demos
import sys, win32api, win32con, win32ui
NotScriptMsg = """\
This demo program is not designed to be run as a Script, but is
probably used by some other test program. Please try another demo.
"""
NeedGUIMsg = """\
This demo program can only be run from inside of Pythonwin
You must start Pythonwin, and select 'Run' from the toolbar or File menu
"""
NeedAppMsg = """\
This demo program is a 'Pythonwin Application'.
It is more demo code than an example of Pythonwin's capabilities.
To run it, you must execute the command:
pythonwin.exe /app "%s"
Would you like to execute it now?
"""
def NotAScript():
import win32ui
win32ui.MessageBox(NotScriptMsg, "Demos")
def NeedGoodGUI():
from pywin.framework.app import HaveGoodGUI
rc = HaveGoodGUI()
if not rc:
win32ui.MessageBox(NeedGUIMsg, "Demos")
return rc
def NeedApp():
import win32ui
rc = win32ui.MessageBox(NeedAppMsg % sys.argv[0], "Demos", win32con.MB_YESNO)
if rc==win32con.IDYES:
try:
parent = win32ui.GetMainFrame().GetSafeHwnd()
win32api.ShellExecute(parent, None, 'pythonwin.exe', '/app "%s"' % sys.argv[0], None, 1)
except win32api.error, details:
win32ui.MessageBox("Error executing command - %s" % (details), "Demos")
if __name__=='__main__':
import demoutils
demoutils.NotAScript()
| apache-2.0 | -4,294,163,530,055,966,000 | 24.173077 | 91 | 0.721161 | false |
idegtiarov/gnocchi-rep | gnocchi/ceilometer/resources/ceph_account.py | 1 | 1072 | #
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gnocchi.ceilometer.resources import base
class CephAccount(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['radosgw.api.request',
'radosgw.objects.size',
'radosgw.objects',
'radosgw.objects.containers',
'radosgw.containers.objects',
'radosgw.containers.objects.size',
]
| apache-2.0 | 4,732,176,125,918,304,000 | 33.580645 | 75 | 0.677239 | false |
hassanabidpk/django | django/template/library.py | 348 | 12752 | import functools
import warnings
from importlib import import_module
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.html import conditional_escape
from django.utils.inspect import getargspec
from django.utils.itercompat import is_iterable
from .base import Node, Template, token_kwargs
from .exceptions import TemplateSyntaxError
class InvalidTemplateLibrary(Exception):
pass
class Library(object):
"""
A class for registering template tags and filters. Compiled filter and
template tag functions are stored in the filters and tags attributes.
The filter, simple_tag, and inclusion_tag methods provide a convenient
way to register callables as tags.
"""
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise ValueError(
"Unsupported arguments to Library.tag: (%r, %r)" %
(name, compile_function),
)
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
"""
Register a callable as a template filter. Example:
@register.filter
def lower(value):
return value.lower()
"""
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it, e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise ValueError(
"Unsupported arguments to Library.filter: (%r, %r)" %
(name, filter_func),
)
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
"""
Register a callable as a compiled template tag. Example:
@register.simple_tag
def hello(*args, **kwargs):
return 'world'
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
target_var = None
if len(bits) >= 2 and bits[-2] == 'as':
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(parser, bits, params,
varargs, varkw, defaults, takes_context, function_name)
return SimpleNode(func, takes_context, args, kwargs, target_var)
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise ValueError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
warnings.warn(
"assignment_tag() is deprecated. Use simple_tag() instead",
RemovedInDjango20Warning,
stacklevel=2,
)
return self.simple_tag(func, takes_context, name)
def inclusion_tag(self, filename, func=None, takes_context=None, name=None):
"""
Register a callable as an inclusion tag:
@register.inclusion_tag('results.html')
def show_results(poll):
choices = poll.choice_set.all()
return {'choices': choices}
"""
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
function_name = (name or getattr(func, '_decorated_function', func).__name__)
@functools.wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
takes_context, function_name,
)
return InclusionNode(
func, takes_context, args, kwargs, filename,
)
self.tag(function_name, compile_func)
return func
return dec
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode and InclusionNode.
Manages the positional and keyword arguments to be passed to the decorated
function.
"""
def __init__(self, func, takes_context, args, kwargs):
self.func = func
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
return resolved_args, resolved_kwargs
class SimpleNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, target_var):
super(SimpleNode, self).__init__(func, takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
output = self.func(*resolved_args, **resolved_kwargs)
if self.target_var is not None:
context[self.target_var] = output
return ''
if context.autoescape:
output = conditional_escape(output)
return output
class InclusionNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, filename):
super(InclusionNode, self).__init__(func, takes_context, args, kwargs)
self.filename = filename
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = self.func(*resolved_args, **resolved_kwargs)
t = context.render_context.get(self)
if t is None:
if isinstance(self.filename, Template):
t = self.filename
elif isinstance(getattr(self.filename, 'template', None), Template):
t = self.filename.template
elif not isinstance(self.filename, six.string_types) and is_iterable(self.filename):
t = context.template.engine.select_template(self.filename)
else:
t = context.template.engine.get_template(self.filename)
context.render_context[self] = t
new_context = context.new(_dict)
# Copy across the CSRF token, if present, because inclusion tags are
# often used for forms, and we need instructions for using CSRF
# protection to be as simple as possible.
csrf_token = context.get('csrf_token')
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return t.render(new_context)
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parse bits for template tag helpers simple_tag and inclusion_tag, in
particular by detecting syntax errors and by extracting positional and
keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = kwarg.popitem()
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def import_library(name):
"""
Load a Library object from a template tag module.
"""
try:
module = import_module(name)
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (name, e)
)
try:
return module.register
except AttributeError:
raise InvalidTemplateLibrary(
"Module %s does not have a variable named 'register'" % name,
)
| bsd-3-clause | -311,783,738,985,000,260 | 37.642424 | 96 | 0.572146 | false |
40223110/2015cda_0512 | static/Brython3.1.0-20150301-090019/Lib/_string.py | 625 | 1112 | """string helper module"""
import re
class __loader__(object):
pass
def formatter_field_name_split(fieldname):
"""split the argument as a field name"""
_list=[]
for _name in fieldname:
_parts = _name.split('.')
for _item in _parts:
is_attr=False #fix me
if re.match('\d+', _item):
_list.append((int(_item), is_attr))
else:
_list.append((_item, is_attr))
return _list[0][0], iter(_list[1:])
def formatter_parser(*args,**kw):
"""parse the argument as a format string"""
assert len(args)==1
assert isinstance(args[0], str)
_result=[]
for _match in re.finditer("([^{]*)?(\{[^}]*\})?", args[0]):
_pre, _fmt = _match.groups()
if _fmt is None:
_result.append((_pre, None, None, None))
elif _fmt == '{}':
_result.append((_pre, '', '', None))
else:
_m=re.match("\{([^!]*)!?(.*)?\}", _fmt)
_name=_m.groups(0)
_flags=_m.groups(1)
_result.append((_pre, _name, _flags, None))
return _result
| gpl-3.0 | 4,717,848,820,400,724,000 | 25.47619 | 63 | 0.488309 | false |
aexeagmbh/django-allauth | allauth/socialaccount/providers/paypal/views.py | 60 | 1606 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import PaypalProvider
class PaypalOAuth2Adapter(OAuth2Adapter):
provider_id = PaypalProvider.id
supports_state = False
@property
def authorize_url(self):
path = 'webapps/auth/protocol/openidconnect/v1/authorize'
return 'https://www.{0}/{1}'.format(self._get_endpoint(), path)
@property
def access_token_url(self):
path = "v1/identity/openidconnect/tokenservice"
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
@property
def profile_url(self):
path = 'v1/identity/openidconnect/userinfo'
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
def _get_endpoint(self):
settings = self.get_provider().get_settings()
if settings.get('MODE') == 'live':
return 'paypal.com'
else:
return 'sandbox.paypal.com'
def complete_login(self, request, app, token, **kwargs):
response = requests.post(self.profile_url,
params={'schema':'openid',
'access_token':token})
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PaypalOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PaypalOAuth2Adapter)
| mit | -1,780,012,782,342,218,500 | 36.348837 | 81 | 0.603985 | false |
hiroakis/ansible | v1/ansible/module_utils/cloudstack.py | 118 | 13221 | # -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
class AnsibleCloudStack:
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
self.cs = CloudStack(**read_config())
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
existing_tags = self.cs.listTags(resourceid=resource['id'])
if existing_tags:
return existing_tags['tag']
return []
def _delete_tags(self, resource, resource_type, tags):
existing_tags = resource['tags']
tags_to_delete = []
for existing_tag in existing_tags:
if existing_tag['key'] in tags:
if existing_tag['value'] != tags[key]:
tags_to_delete.append(existing_tag)
else:
tags_to_delete.append(existing_tag)
if tags_to_delete:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_delete
self.cs.deleteTags(**args)
def _create_tags(self, resource, resource_type, tags):
tags_to_create = []
for i, tag_entry in enumerate(tags):
tag = {
'key': tag_entry['key'],
'value': tag_entry['value'],
}
tags_to_create.append(tag)
if tags_to_create:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_create
self.cs.createTags(**args)
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._delete_tags(resource, resource_type, tags)
self._create_tags(resource, resource_type, tags)
resource['tags'] = self.get_tags(resource)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.cs.listCapabilities()
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
# TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
return self.poll_job(job=job, key=key)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
| gpl-3.0 | -3,366,101,794,507,996,000 | 34.923913 | 110 | 0.571407 | false |
zasdfgbnm/tensorflow | tensorflow/python/ops/nn_batchnorm_test.py | 5 | 30554 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
@test_util.with_c_api
class BatchNormalizationTest(test.TestCase):
def _npBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) / np.sqrt(v + epsilon)
y = y * gamma if scale_after_normalization else y
return y + beta if shift_after_normalization else y
def _opsBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) * math_ops.rsqrt(v + epsilon)
if scale_after_normalization:
y = gamma * y
return y + beta if shift_after_normalization else y
def _tfBatchNormV1(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Original implementation."""
test_util.set_producer_version(ops.get_default_graph(), 8)
return gen_nn_ops._batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
# pylint: enable=protected-access
def _tfBatchNormV1BW(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Re-implementation of the original kernel for backward compatibility."""
return nn_impl.batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV2(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
"""New implementation."""
return nn_impl.batch_normalization(x, m, v, beta if
shift_after_normalization else None,
gamma if scale_after_normalization else
None, epsilon)
def testBatchNorm(self):
x_shape = [3, 5, 4, 2]
param_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn2 = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
bn1bw = self._tfBatchNormV1BW(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
bn1 = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_bn = self._npBatchNorm(x_val, m_val, v_val, beta_val, gamma_val,
epsilon, scale_after_normalization,
shift_after_normalization)
tf_bn_v2, tf_bn_v1bw, tf_bn_v1, ops_bn = sess.run(
[bn2, bn1bw, bn1, on])
self.assertAllClose(np_bn, ops_bn, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v2, atol=0.00001)
self.assertAllClose(tf_bn_v2, ops_bn, atol=0.00001)
# shift_after_normalization=False is not supported in v1.
if shift_after_normalization:
self.assertAllClose(np_bn, tf_bn_v1bw, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v1, atol=0.00001)
self.assertAllClose(tf_bn_v1, ops_bn, atol=0.00001)
self.assertAllClose(tf_bn_v1bw, ops_bn, atol=0.00001)
def _testBatchNormGradient(self,
param_index,
tag,
scale_after_normalization,
shift_after_normalization,
version,
err_tolerance=1e-11):
x_shape = [3, 5, 4, 5]
param_shape = [5]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
m_val = np.random.random_sample(param_shape).astype(np.float64)
v_val = np.random.random_sample(param_shape).astype(np.float64)
beta_val = np.random.random_sample(param_shape).astype(np.float64)
gamma_val = np.random.random_sample(param_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
if version == 1:
output = self._tfBatchNormV1(x, m, v, beta, gamma, epsilon,
scale_after_normalization)
elif version == 2:
output = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
else:
print("Invalid version", version)
raise ValueError()
all_params = [x, m, v, beta, gamma]
all_shapes = [x_shape, param_shape, param_shape, param_shape, param_shape]
err = gradient_checker.compute_gradient_error(all_params[param_index],
all_shapes[param_index],
output, x_shape)
print("Batch normalization v%d %s gradient %s scale and %s shift err = " %
(version, tag, "with" if scale_after_normalization else "without",
"with" if shift_after_normalization else "without"), err)
self.assertLess(err, err_tolerance)
def _testBatchNormGradientInAllNeedConfigs(self,
param_index,
tag,
err_tolerance=1e-11):
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
# shift_after_normalization=False is not supported in version 1.
for v in ([1, 2] if shift_after_normalization else [2]):
self._testBatchNormGradient(param_index, tag,
scale_after_normalization,
shift_after_normalization, v,
err_tolerance)
def testBatchNormInputGradient(self):
self._testBatchNormGradientInAllNeedConfigs(0, "x")
def testBatchNormMeanGradient(self):
self._testBatchNormGradientInAllNeedConfigs(1, "mean")
def testBatchNormVarianceGradient(self):
self._testBatchNormGradientInAllNeedConfigs(
2, "variance", err_tolerance=1e-03)
def testBatchNormBetaGradient(self):
# Since beta does not exist when scale_after_normalization=False, we only
# test for scale_after_normalization=True.
for scale_after_normalization in [True, False]:
for v in [1, 2]:
self._testBatchNormGradient(3, "beta", scale_after_normalization, True,
v)
def testBatchNormGammaGradient(self):
# If scale_after_normalization is False, backprop for gamma in v1
# will be 0. In version 2 of the API, if scale_after_normalization is False,
# gamma is not used at all, and the gradient is None, which displeases the
# gradient checker.
for scale_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", scale_after_normalization, True,
1)
for shift_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", True, shift_after_normalization,
2)
def testBatchNormGradImpl(self):
x_shape = [7, 5, 4, 6]
param_shape = [6]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
backprop_val = np.random.random_sample(x_shape).astype(np.float32)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
backprop = constant_op.constant(backprop_val, name="backprop")
epsilon = 0.001
for scale_after_normalization in [True, False]:
# _batch_norm_with_global_normalization_grad is deprecated in v9
test_util.set_producer_version(ops.get_default_graph(), 8)
grad = gen_nn_ops._batch_norm_with_global_normalization_grad(
x, m, v, gamma, backprop, epsilon, scale_after_normalization)
dx, dm, dv, db, dg = grad
self.assertEqual(grad.dx, dx)
self.assertEqual(grad.dm, dm)
self.assertEqual(grad.dv, dv)
self.assertEqual(grad.db, db)
self.assertEqual(grad.dg, dg)
on = self._opsBatchNorm(x, m, v, beta, gamma, epsilon,
scale_after_normalization, True)
odx, odm, odv, odb, odg = gradients_impl.gradients(
[on], [x, m, v, beta, gamma], [backprop])
if scale_after_normalization:
all_grads = sess.run([dx, dm, dv, db, dg, odx, odm, odv, odb, odg])
to_check = ["dx", "dm", "dv", "db", "dg"]
else:
all_grads = sess.run([dx, dm, dv, db, odx, odm, odv, odb])
to_check = ["dx", "dm", "dv", "db"]
for i, _ in enumerate(to_check):
self.assertAllClose(
all_grads[i + len(to_check)], all_grads[i], atol=0.000001)
def testBatchNormKeepDims(self):
"""Test for tf.nn.moments(..., keep_dims=True / False).
Make sure that parameters with shape (1, 1, 1, depth) yield the same
result as parameters with shape (depth)
"""
x_shape = (3, 5, 4, 2)
param_shape = (2)
keep_dims_param_shape = (1, 1, 1, 2)
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
keep_dims_m = array_ops.reshape(
m, keep_dims_param_shape, name="keep_dims_m")
keep_dims_v = array_ops.reshape(
v, keep_dims_param_shape, name="keep_dims_v")
keep_dims_beta = array_ops.reshape(
beta, keep_dims_param_shape, name="keep_dims_beta")
keep_dims_gamma = array_ops.reshape(
gamma, keep_dims_param_shape, name="keep_dims_gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
keep_dims_bn = self._tfBatchNormV2(x, keep_dims_m, keep_dims_v,
keep_dims_beta, keep_dims_gamma,
epsilon,
scale_after_normalization,
shift_after_normalization)
tf_batch_norm, keep_dims_tf_batch_norm = sess.run(
[bn, keep_dims_bn])
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertEquals(x_shape, keep_dims_tf_batch_norm.shape)
self.assertAllClose(
tf_batch_norm, keep_dims_tf_batch_norm, atol=0.000001)
def _testBatchNormArbitraryShapes(self, x_shape, param_shape, atol=0.0001):
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name="x")
m = constant_op.constant(m_val, name="m")
v = constant_op.constant(v_val, name="v")
beta = constant_op.constant(beta_val, name="beta")
gamma = constant_op.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(x, m, v, beta, gamma, epsilon,
scale_after_normalization,
shift_after_normalization)
np_batch_norm = self._npBatchNorm(x_val, m_val, v_val, beta_val,
gamma_val, epsilon,
scale_after_normalization,
shift_after_normalization)
[tf_batch_norm] = sess.run([bn])
self.assertEquals(x_shape, np_batch_norm.shape)
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertAllClose(np_batch_norm, tf_batch_norm, atol=atol)
def testBatchNormArbitraryShapes(self):
"""Test for a variety of shapes and moments.
Batch normalization is expected to work regardless of the position and
dimensionality of the 'depth' axis/axes.
"""
self._testBatchNormArbitraryShapes((3, 3), (1, 3))
self._testBatchNormArbitraryShapes((3, 3), (3, 1))
self._testBatchNormArbitraryShapes((3, 2, 4, 5), (1, 2, 1, 1))
self._testBatchNormArbitraryShapes(
(2, 3, 2, 4, 5), (1, 1, 1, 4, 5), atol=0.005)
@test_util.with_c_api
class SufficientStatisticsTest(test.TestCase):
def _npSuffStats(self, x, axes, shift, keep_dims):
axis = tuple(axes)
if shift is not None:
m_ss = np.sum(x - shift, axis=axis, keepdims=keep_dims)
v_ss = np.sum((x - shift) * (x - shift), axis=axis, keepdims=keep_dims)
else:
m_ss = np.sum(x, axis=axis, keepdims=keep_dims)
v_ss = np.sum(x * x, axis=axis, keepdims=keep_dims)
count = 1.0
for d in xrange(x.ndim):
if d in set(axes):
count *= x.shape[d]
if not keep_dims:
shift = np.squeeze(shift, axis=axis)
return count, m_ss, v_ss, shift
def _opSuffStats(self, x, axes, shift, keep_dims):
return nn_impl.sufficient_statistics(x, axes, shift, keep_dims)
def _testSuffStats(self, x_shape, axes, shift, keep_dims, has_shape):
x_val = np.random.random_sample(x_shape).astype(np.float32)
np_c, np_m, np_v, np_s = self._npSuffStats(x_val, axes, shift, keep_dims)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
if has_shape:
x = constant_op.constant(x_val, name="x")
x.set_shape(x_shape)
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s])
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v])
else:
x = array_ops.placeholder(
dtype=dtypes.float32, shape=[None] * len(x_shape), name="x")
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s],
feed_dict={x: x_val})
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v],
feed_dict={x: x_val})
self.assertAllClose(np_c, tf_c, atol=0.000001)
self.assertAllClose(np_m, tf_m, atol=0.000001)
self.assertAllClose(np_v, tf_v, atol=0.000001)
if shift:
self.assertAllClose(np_s, tf_s, atol=0.000001)
def testSuffStats(self):
for has_shape in [True, False]:
for keep_dims in [True, False]:
for shift in [None, 1.0]:
self._testSuffStats([2, 3], [1], shift, keep_dims, has_shape)
self._testSuffStats([2, 3], [0], shift, keep_dims, has_shape)
self._testSuffStats([1, 2, 3], [0, 2], shift, keep_dims, has_shape)
@test_util.with_c_api
class NormalizeMomentsTest(test.TestCase):
def _npNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
mean = mean_ss / counts
variance = variance_ss / counts - mean * mean
if shift is not None:
mean += shift
return mean, variance
def _opNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
return nn_impl.normalize_moments(counts, mean_ss, variance_ss, shift)
def _testNormalizeMoments(self, shape, shift):
counts = np.ones([1]).astype(np.float32)
mean_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss *= variance_ss
if shift:
shift_v = np.random.random_sample(shape).astype(np.float32)
else:
shift_v = None
npm, npv = self._npNormalizeMoments(counts, mean_ss, variance_ss, shift_v)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
tf_counts = constant_op.constant(counts, name="counts")
tf_mean_ss = constant_op.constant(mean_ss, name="mean_ss")
tf_variance_ss = constant_op.constant(variance_ss, name="variance_ss")
if shift:
tf_shift_v = constant_op.constant(shift_v, name="shift")
else:
tf_shift_v = None
opm, opv = self._opNormalizeMoments(tf_counts, tf_mean_ss,
tf_variance_ss, tf_shift_v)
tfm, tfv = sess.run([opm, opv])
self.assertAllClose(npm, tfm, atol=0.000001)
self.assertAllClose(npv, tfv, atol=0.000001)
def testNormalizeMoments(self):
for shift in [None, 4.0]:
self._testNormalizeMoments([3], shift)
self._testNormalizeMoments([2, 3], shift)
@test_util.with_c_api
class MomentsTest(test.TestCase):
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
# Method to compute moments of `x` wrt `axes`.
#
# This is exposed so WeightedMomentsTest can inherit the tests and
# assertions from MomentsTest; the extra_out_grads argument allows
# its inherited gradient tests to assert gradients against the
# weights as well as the input values.
return nn_impl.moments(x, axes, keep_dims=keep_dims)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = array_ops.placeholder(dtype, shape=[None] * len(shape))
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(
expected_mean, mean.eval(feed_dict={x: x_numpy}))
self.assertAllCloseAccordingToType(
expected_variance, var.eval(feed_dict={x: x_numpy}))
def RunMomentTest(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = math_ops.cast(constant_op.constant(x_numpy), dtype=dtype)
# Compute the expected values at high precision since the method
# is prone to catastrophic cancellation:
x_numpy = x_numpy.astype(np.float128)
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(x_numpy, axis=ax,
keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(expected_mean, mean.eval())
self.assertAllCloseAccordingToType(expected_variance, var.eval())
def testBasic(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims, dtype=dtype)
def testGlobalNormalization(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
def testAxes(self):
for keep_dims in [False, True]:
for dtype in [dtypes.float32, dtypes.float16]:
self.RunMomentTest(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
def _testGlobalGradient(self, from_y="mean"):
with self.test_session():
x_shape = [3, 5, 4, 2]
x_val = np.random.random_sample(x_shape).astype(np.float64)
x = constant_op.constant(x_val)
x.set_shape(x_shape)
axes = [0, 1, 2]
y_shape = [2] # Depth of x
inputs_to_compute_gradients_for = [x]
out_mean, out_var = self._unweighted_moments(
x, axes, extra_out_grads=inputs_to_compute_gradients_for)
if from_y == "mean":
y = out_mean
elif from_y == "var":
y = out_var
for (i, v) in enumerate(inputs_to_compute_gradients_for):
err = gradient_checker.compute_gradient_error(v,
v.get_shape().as_list(),
y, y_shape)
print("Moments %s gradient err vs input %d = %g" % (from_y, i, err))
self.assertLess(err, 1e-11)
def testMeanGlobalGradient(self):
self._testGlobalGradient(from_y="mean")
def testVarGlobalGradient(self):
self._testGlobalGradient(from_y="var")
@test_util.with_c_api
class WeightedMomentsTest(MomentsTest):
"""Tests for nn.weighted_moments.
Note that this test inherits from MomentsTest, inheriting all its
test methods!
It modifies MomentsTest in two ways:
a) By overriding _unweighted_moments, all the codepaths in
MomentsTest are executed, but with calls to tf.nn.moments()
replaced by calls to tf.nn.weighted_moments() with a constant
weight of 1.
b) By overriding RunMomentTest and RunMomentTestWithDynamicShape,
this test adds multiple additional calls to
RunWeightedMomentsTest() to exercise correctness with
non-constant weights and varying broadcasting situations. (It
also continues to call MomentsTest.Run(Weighted)?MomentsTest as
well.)
"""
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
weights = constant_op.constant(1, dtype=x.dtype)
if extra_out_grads is not None:
# We want to assert gradients WRT weights as well as X!
extra_out_grads.append(weights)
return nn_impl.weighted_moments(x, axes, weights, keep_dims=keep_dims)
def RunMomentTest(self, shape, axes, keep_dims, dtype, dynshapes=False):
if not dynshapes:
super(WeightedMomentsTest, self).RunMomentTest(shape, axes, keep_dims,
dtype)
else:
super(WeightedMomentsTest, self).RunMomentTestWithDynamicShape(shape,
axes,
keep_dims,
dtype)
# 1:1 weights and inputs
self.RunWeightedMomentTest(shape, shape, axes, keep_dims, dtype)
# Various broadcasting combinations
for idx in range(len(shape)):
# try broadcasting weights in all positions
weight_shape = [1] * len(shape)
weight_shape[idx] = shape[idx]
self.RunWeightedMomentTest(shape, weight_shape, axes, keep_dims, dtype)
# Also try broadcasting with a suffix of length n
weight_shape = shape[-(idx + 1):]
self.RunWeightedMomentTest(
shape, weight_shape, axes, keep_dims, dtype, dynshapes=dynshapes)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
self.RunMomentTest(shape, axes, keep_dims, dtype, dynshapes=True)
def RunWeightedMomentTest(self,
shape,
weights_shape,
axes,
keep_dims,
dtype,
dynshapes=False):
with self.test_session() as s:
x_numpy = np.random.normal(size=shape).astype(np.float32)
weights_numpy = np.absolute( # weights must be positive
np.random.normal(
size=weights_shape, loc=1.0).astype(np.float32))
# Expand the numpy version to higher precision
x_numpy = x_numpy.astype(np.float128)
weights_numpy = weights_numpy.astype(np.float128)
x_shape = [None] * len(shape) if dynshapes else shape
weights_shape = ([None] * len(weights_shape) if dynshapes else
weights_shape)
x = array_ops.placeholder(dtype, shape=x_shape)
weights = array_ops.placeholder(dtype, shape=weights_shape)
mean, var = nn_impl.weighted_moments(
x, axes, weights, keep_dims=keep_dims)
ax = tuple(axes)
def _np_weighted_sum(v):
return np.sum(weights_numpy * v, axis=ax, keepdims=keep_dims)
weight_sum = _np_weighted_sum(np.ones_like(x_numpy))
expected_mean = _np_weighted_sum(x_numpy) / weight_sum
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = (_np_weighted_sum(np.multiply(x_numpy, x_numpy)) /
weight_sum)
expected_variance = expected_x_squared - expected_mean_squared
mean_v, var_v = s.run([mean, var],
feed_dict={x: x_numpy,
weights: weights_numpy})
self.assertAllCloseAccordingToType(expected_mean, mean_v)
self.assertAllCloseAccordingToType(expected_variance, var_v)
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,058,398,854,068,209,000 | 43.539359 | 80 | 0.595699 | false |
nimbis/django-cms | cms/management/commands/subcommands/tree.py | 4 | 5434 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from collections import OrderedDict
from cms.models import Page, CMSPlugin
from .base import SubcommandsCommand
def get_descendant_ids(root_id):
"""
Returns the a generator of primary keys which represent
descendants of the given page ID (root_id)
"""
# Note this is done because get_descendants() can't be trusted
# as the tree can be corrupt.
children = Page.objects.filter(parent=root_id).values_list('pk', flat=True)
for child_id in children.iterator():
yield child_id
for descendant_id in get_descendant_ids(child_id):
yield descendant_id
class FixTreeCommand(SubcommandsCommand):
help_string = 'Repairing Materialized Path Tree for Pages'
command_name = 'fix-tree'
def handle(self, *args, **options):
"""
Repairs the tree
"""
self.stdout.write('fixing page tree')
Page.fix_tree()
root_draft_pages = Page.objects.filter(
publisher_is_draft=True,
parent__isnull=True,
)
last = None
try:
first = root_draft_pages.order_by('path')[0]
except IndexError:
first = None
for page in root_draft_pages.order_by('site__pk', 'path'):
if last:
last = last.reload()
page = page.reload()
page.move(target=last, pos='right')
elif first and first.pk != page.pk:
page.move(target=first, pos='left')
last = page.reload()
root_public_pages = Page.objects.filter(
publisher_is_draft=False,
parent__isnull=True,
).order_by('publisher_public__path')
# Filter out any root public pages whose draft page
# has a parent.
# This avoids a tree corruption where the public root page
# is added as a child of the draft page's draft parent
# instead of the draft page's public parent
root_public_pages = root_public_pages.filter(
publisher_public__parent__isnull=True
)
for page in root_public_pages:
page = page.reload()
public = page.publisher_public
page.move(target=public, pos='right')
for root in root_draft_pages.order_by('site__pk', 'path'):
self._update_descendants_tree(root)
self.stdout.write('fixing plugin tree')
CMSPlugin.fix_tree()
self.stdout.write('all done')
def _update_descendants_tree(self, root):
descendants_ids = get_descendant_ids(root.pk)
public_root_sibling = root.publisher_public
draft_descendants = (
Page
.objects
.filter(pk__in=descendants_ids)
.select_related('parent', 'publisher_public')
.order_by('depth', 'path')
)
descendants_by_parent = OrderedDict()
for descendant in draft_descendants.iterator():
parent = descendant.parent_id
descendants_by_parent.setdefault(parent, []).append(descendant)
for tree in descendants_by_parent.values():
last_draft = None
last_public = None
draft_parent = tree[0].parent
public_parent = draft_parent.publisher_public
for draft_page in tree:
draft_page.refresh_from_db()
if last_draft:
# This is not the loop so this is not the first draft
# child. Set this page a sibling of the last processed
# draft page.
draft_page.move(target=last_draft.reload(), pos='right')
else:
# This is the first time through the loop so this is the first
# draft child for this parent.
draft_page.move(target=draft_parent.reload(), pos='first-child')
last_draft = draft_page
if not draft_page.publisher_public_id:
continue
public_page = draft_page.publisher_public
if last_public:
public_target = last_public
public_position = 'right'
last_public = public_page
elif public_parent:
# always insert the first public child node found
# as the first child of the public parent
public_target = public_parent
public_position = 'first-child'
last_public = public_page
else:
# No public parent has been found
# Insert the node as a sibling to the last root sibling
# Its very unlikely but possible for the root to not have
# a public page. When this happens, use the root draft page
# as sibling.
public_target = public_root_sibling or root
public_position = 'right'
# This page now becomes the last root sibling
public_root_sibling = public_page
public_page.refresh_from_db()
public_page.move(
target=public_target.reload(),
pos=public_position,
)
| bsd-3-clause | -8,145,501,329,662,888,000 | 34.285714 | 84 | 0.552079 | false |
raboof/supybot | plugins/Alias/config.py | 15 | 2179 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Alias', True)
Alias = conf.registerPlugin('Alias')
conf.registerGroup(Alias, 'aliases')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | -6,640,934,482,933,848,000 | 48.522727 | 79 | 0.770078 | false |
alexforencich/python-ivi | ivi/tektronix/tektronixDPO4032.py | 2 | 1646 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixDPO4000 import *
class tektronixDPO4032(tektronixDPO4000):
"Tektronix DPO4032 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DPO4032')
super(tektronixDPO4032, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
| mit | 647,397,318,233,043,200 | 38.190476 | 86 | 0.753949 | false |
petrutlucian94/nova | nova/objects/__init__.py | 9 | 2746 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(comstud): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Instance,
# etc, yet you do not see these attributes in here. Never fear, there is
# a little bit of magic. When objects are registered, an attribute is set
# on this module automatically, pointing to the newest/latest version of
# the object.
def register_all():
# NOTE(danms): You must make sure your object gets imported in this
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('nova.objects.agent')
__import__('nova.objects.aggregate')
__import__('nova.objects.bandwidth_usage')
__import__('nova.objects.block_device')
__import__('nova.objects.cell_mapping')
__import__('nova.objects.compute_node')
__import__('nova.objects.dns_domain')
__import__('nova.objects.ec2')
__import__('nova.objects.external_event')
__import__('nova.objects.fixed_ip')
__import__('nova.objects.flavor')
__import__('nova.objects.floating_ip')
__import__('nova.objects.hv_spec')
__import__('nova.objects.instance')
__import__('nova.objects.instance_action')
__import__('nova.objects.instance_fault')
__import__('nova.objects.instance_group')
__import__('nova.objects.instance_info_cache')
__import__('nova.objects.instance_mapping')
__import__('nova.objects.instance_numa_topology')
__import__('nova.objects.instance_pci_requests')
__import__('nova.objects.keypair')
__import__('nova.objects.migration')
__import__('nova.objects.network')
__import__('nova.objects.network_request')
__import__('nova.objects.numa')
__import__('nova.objects.pci_device')
__import__('nova.objects.pci_device_pool')
__import__('nova.objects.tag')
__import__('nova.objects.quotas')
__import__('nova.objects.security_group')
__import__('nova.objects.security_group_rule')
__import__('nova.objects.service')
__import__('nova.objects.vcpu_model')
__import__('nova.objects.virt_cpu_topology')
__import__('nova.objects.virtual_interface')
| apache-2.0 | 8,639,524,802,835,525,000 | 43.290323 | 78 | 0.678077 | false |
40023154/0628 | static/Brython3.1.1-20150328-091302/Lib/warnings.py | 752 | 13825 | """Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
| gpl-3.0 | 2,274,583,030,067,186,400 | 34.178117 | 79 | 0.605425 | false |
coronary/RandomEpisode | depends/Lib/site-packages/setuptools/__init__.py | 130 | 5019 | """Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
from six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages',
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
monkey.patch_all()
| mit | -7,721,178,500,660,913,000 | 30.36875 | 79 | 0.630803 | false |
tangfeixiong/nova | nova/tests/unit/objects/test_objects.py | 2 | 59782 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import contextlib
import copy
import datetime
import hashlib
import inspect
import os
import pprint
import mock
from oslo_log import log
from oslo_utils import timeutils
import six
from testtools import matchers
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import rpc
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLogger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.IntegerField()}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.IntegerField(default=1),
'bar': fields.StringField(),
'missing': fields.StringField(),
'readonly': fields.IntegerField(read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.StringField()}
class TestMetaclass(test.NoDBTestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.NovaObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.NovaObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.IPV4AndV6AddressField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.IPV4AndV6Address)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.NoDBTestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('MyObj'),
}
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
self.useFixture(nova_fixtures.IndirectionAPIFixture(None))
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return (base.NovaObject.obj_from_primitive(result, context=args[0])
if isinstance(result, base.NovaObject) else result)
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
self.useFixture(nova_fixtures.IndirectionAPIFixture(
conductor_rpcapi.ConductorAPI()))
# To make sure local and remote contexts match
self.stubs.Set(rpc.RequestContextSerializer,
'serialize_context',
lambda s, c: c)
self.stubs.Set(rpc.RequestContextSerializer,
'deserialize_context',
lambda s, c: c)
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': fields.IntegerField()}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
self.assertEqual('1.5', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.6', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.6', error.kwargs['supported'])
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
def test_changed_with_sub_object(self):
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
actual = obj.obj_to_primitive()
self.assertJsonEqual(actual, expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_obj_reset_changes_recursive(self):
obj = MyObj(rel_object=MyOwnedObject(baz=123),
rel_objects=[MyOwnedObject(baz=456)])
self.assertEqual(set(['rel_object', 'rel_objects']),
obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True, fields=['foo'])
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True)
self.assertEqual(set([]), obj.rel_object.obj_what_changed())
self.assertEqual(set([]), obj.obj_what_changed())
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object',
'rel_objects', 'mutable_default'] +
base_fields)
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_obj_alternate_context(self):
obj = MyObj(context=self.context)
with obj.obj_alternate_context(mock.sentinel.alt_ctx):
self.assertEqual(mock.sentinel.alt_ctx,
obj._context)
self.assertEqual(self.context, obj._context)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(exception.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_mutable_default(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.mutable_default = None
obj.mutable_default.append('s1')
self.assertEqual(obj.mutable_default, ['s1'])
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.mutable_default = None
obj1.mutable_default.append('s2')
self.assertEqual(obj1.mutable_default, ['s2'])
def test_obj_mutable_default_set_default(self):
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.obj_set_defaults('mutable_default')
self.assertEqual(obj1.mutable_default, [])
obj1.mutable_default.append('s1')
self.assertEqual(obj1.mutable_default, ['s1'])
obj2 = MyObj(context=self.context, foo=123, bar='abc')
obj2.obj_set_defaults('mutable_default')
self.assertEqual(obj2.mutable_default, [])
obj2.mutable_default.append('s2')
self.assertEqual(obj2.mutable_default, ['s2'])
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,'
'mutable_default=<?>,readonly=<?>,rel_object=<?>,'
'rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
subobj.VERSION = '1.2'
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
orig_primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', primitive)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_complains_about_missing_rules(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {}
self.assertRaises(exception.ObjectActionError,
obj.obj_make_compatible, {}, '1.0')
def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self):
class MyList(base.ObjectListBase, base.NovaObject):
VERSION = '1.2'
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList(objects=[])
class MyOwner(base.NovaObject):
VERSION = '1.2'
fields = {'mylist': fields.ObjectField('MyList')}
obj_relationships = {
'mylist': [('1.1', '1.1')],
}
myowner = MyOwner(mylist=mylist)
primitive = myowner.obj_to_primitive('1.1')
self.assertIn('mylist', primitive['nova_object.data'])
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
def test_delattr(self):
obj = MyObj(bar='foo')
del obj.bar
# Should appear unset now
self.assertFalse(obj.obj_attr_is_set('bar'))
# Make sure post-delete, references trigger lazy loads
self.assertEqual('loaded!', getattr(obj, 'bar'))
def test_delattr_unset(self):
obj = MyObj()
self.assertRaises(AttributeError, delattr, obj, 'bar')
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(exception.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo', 'mutable_default']),
obj.obj_what_changed())
self.assertEqual(1, obj.foo)
def test_set_defaults_not_overwrite(self):
# NOTE(danms): deleted defaults to False, so verify that it does
# not get reset by obj_set_defaults()
obj = MyObj(deleted=True)
obj.obj_set_defaults()
self.assertEqual(1, obj.foo)
self.assertTrue(obj.deleted)
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.VERSION = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.VERSION = '1.7'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.VERSION = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
def test_compat(self):
MyObj2.VERSION = '1.1'
obj = MyObj2.query(self.context)
self.assertEqual('oldbar', obj.bar)
def test_revision_ignored(self):
MyObj2.VERSION = '1.1.456'
obj = MyObj2.query(self.context)
self.assertEqual('bar', obj.bar)
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport.called)
else:
self.assertEqual('backported', result)
ser._conductor.object_backport.assert_called_with(self.context,
primitive,
backported_to)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in six.itervalues(primitive):
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in six.itervalues(thing2):
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = timeutils.strtime(at=self.now)
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now}
for key, val in six.iteritems(kwargs):
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
'AgentList': '1.0-4f12bf96ca77315e7e023d588fb071f1',
'Aggregate': '1.1-1ab35c4516f71de0bef7087026ab10d1',
'AggregateList': '1.2-79689d69db4de545a82fe09f30468c53',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-77b4d43e641459f464a6aa4d53debd8f',
'BlockDeviceMapping': '1.9-72d92c263f03a5cbc1761b0ea4c66c22',
'BlockDeviceMappingList': '1.10-972d431e07463ae1f68e752521937b01',
'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd',
'ComputeNode': '1.11-71784d2e6f2814ab467d4e0f69286843',
'ComputeNodeList': '1.11-8d269636229e8a39fef1c3514f77d0c0',
'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a',
'DNSDomainList': '1.0-f876961b1a6afe400b49cf940671db86',
'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9',
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'EC2SnapshotMapping': '1.0-47e7ddabe1af966dce0cfd0ed6cd7cd1',
'EC2VolumeMapping': '1.0-5b713751d6f97bad620f3378a521020d',
'FixedIP': '1.10-b5818a33996228fc146f096d1403742c',
'FixedIPList': '1.10-d0db9597559409a4a01b3577500dfe5e',
'Flavor': '1.1-b6bb7a730a79d720344accefafacf7ee',
'FlavorList': '1.1-d96e87307f94062ce538f77b5e221e13',
'FloatingIP': '1.6-52a67d52d85eb8b3f324a5b7935a335b',
'FloatingIPList': '1.7-bdd31ccd6ff9bb0d290108397b3cd44c',
'HVSpec': '1.0-3999ff70698fc472c2d4d60359949f6b',
'ImageMeta': '1.1-642d1b2eb3e880a367f37d72dd76162d',
'ImageMetaProps': '1.1-8fe09b7872538f291649e77375f8ac4c',
'Instance': '1.20-260d385315d4868b6397c61a13109841',
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
'InstanceActionEventList': '1.0-c37db4e58b637a857c90fb02284d8f7c',
'InstanceActionList': '1.0-89266105d853ff9b8f83351776fab788',
'InstanceExternalEvent': '1.0-33cc4a1bbd0655f68c0ee791b95da7e6',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.1-ac4076924f7eb5374a92e4f9db7aa053',
'InstanceGroup': '1.9-a413a4ec0ff391e3ef0faa4e3e2a96d0',
'InstanceGroupList': '1.6-1e383df73d9bd224714df83d9a9983bb',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '1.17-64f6949d58e4ecd3219142f1567a61d9',
'InstanceMapping': '1.0-47ef26034dfcbea78427565d9177fe50',
'InstanceMappingList': '1.0-b7b108f6a56bd100c20a3ebd5f3801a1',
'InstanceNUMACell': '1.2-535ef30e0de2d6a0d26a71bd58ecafc4',
'InstanceNUMATopology': '1.1-d944a7d6c21e1c773ffdf09c6d025954',
'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf',
'InstancePCIRequests': '1.1-fc8d179960869c9af038205a80af2541',
'KeyPair': '1.3-bfaa2a8b148cdf11e0c72435d9dd097a',
'KeyPairList': '1.2-60f984184dc5a8eba6e34e20cbabef04',
'Migration': '1.2-331b1f37d0b20b932614181b9832c860',
'MigrationList': '1.2-5e79c0693d7ebe4e9ac03b5db11ab243',
'MyObj': '1.6-ee7b607402fbfb3390a92ab7199e0d88',
'MyOwnedObject': '1.0-fec853730bd02d54cc32771dd67f08a0',
'NUMACell': '1.2-74fc993ac5c83005e76e34e8487f1c05',
'NUMAPagesTopology': '1.0-c71d86317283266dc8364c149155e48e',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
'NUMATopologyLimits': '1.0-9463e0edd40f64765ae518a539b9dfd2',
'Network': '1.2-a977ab383aa462a479b2fae8211a5dde',
'NetworkList': '1.2-b2ae592657f06f6edce4c616821abcf8',
'NetworkRequest': '1.1-7a3e4ca2ce1e7b62d8400488f2f2b756',
'NetworkRequestList': '1.1-ea2a8e1c1ecf3608af2956e657adeb4c',
'PciDevice': '1.3-4d43db45e3978fca4280f696633c7c20',
'PciDeviceList': '1.1-2b8b6d0cf622c58543c5dec50c7e877c',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-ea2a8e1c1ecf3608af2956e657adeb4c',
'Quotas': '1.2-1fe4cd50593aaf5d36a6dc5ab3f98fb3',
'QuotasNoOp': '1.2-e041ddeb7dc8188ca71706f78aad41c1',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
'SecurityGroup': '1.1-0e1b9ba42fe85c13c1437f8b74bdb976',
'SecurityGroupList': '1.0-a3bb51998e7d2a95b3e613111e853817',
'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29',
'SecurityGroupRuleList': '1.1-521f1aeb7b0cc00d026175509289d020',
'Service': '1.13-bc6c9671a91439e08224c2652da5fc4c',
'ServiceList': '1.11-d1728430a30700c143e542b7c75f65b0',
'Tag': '1.0-616bf44af4a22e853c17b37a758ec73e',
'TagList': '1.0-e16d65894484b7530b720792ffbbbd02',
'TestSubclassedObject': '1.6-716fc8b481c9374f7e222de03ba0a621',
'VirtCPUFeature': '1.0-3310718d8c72309259a6e39bdefe83ee',
'VirtCPUModel': '1.0-6a5cc9f322729fc70ddc6733bacd57d3',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.0-19921e38cba320f355d56ecbf8f29587',
'VirtualInterfaceList': '1.0-16a5c18df5574a9405e1a8b350ed8b27',
}
object_relationships = {
'BlockDeviceMapping': {'Instance': '1.20'},
'ComputeNode': {'HVSpec': '1.0', 'PciDevicePoolList': '1.1'},
'FixedIP': {'Instance': '1.20', 'Network': '1.2',
'VirtualInterface': '1.0',
'FloatingIPList': '1.7'},
'FloatingIP': {'FixedIP': '1.10'},
'ImageMeta': {'ImageMetaProps': '1.1'},
'Instance': {'InstanceFault': '1.2',
'InstanceInfoCache': '1.5',
'InstanceNUMATopology': '1.1',
'PciDeviceList': '1.1',
'TagList': '1.0',
'SecurityGroupList': '1.0',
'Flavor': '1.1',
'InstancePCIRequests': '1.1',
'VirtCPUModel': '1.0',
'EC2Ids': '1.0',
},
'InstanceNUMACell': {'VirtCPUTopology': '1.0'},
'InstanceNUMATopology': {'InstanceNUMACell': '1.2'},
'InstancePCIRequests': {'InstancePCIRequest': '1.1'},
'MyObj': {'MyOwnedObject': '1.0'},
'NUMACell': {'NUMAPagesTopology': '1.0'},
'NUMATopology': {'NUMACell': '1.2'},
'SecurityGroupRule': {'SecurityGroup': '1.1'},
'Service': {'ComputeNode': '1.11'},
'TestSubclassedObject': {'MyOwnedObject': '1.0'},
'VirtCPUModel': {'VirtCPUFeature': '1.0', 'VirtCPUTopology': '1.0'},
}
class TestObjectVersions(test.NoDBTestCase):
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _get_fingerprint(self, obj_name):
obj_class = base.NovaObject._obj_classes[obj_name][0]
fields = obj_class.fields.items()
fields.sort()
methods = []
for name in dir(obj_class):
thing = getattr(obj_class, name)
if inspect.ismethod(thing) or isinstance(thing, classmethod):
method = self._find_remotable_method(obj_class, thing)
if method:
methods.append((name, inspect.getargspec(method)))
methods.sort()
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
if hasattr(obj_class, 'child_versions'):
relevant_data = (fields, methods,
OrderedDict(
sorted(obj_class.child_versions.items())))
else:
relevant_data = (fields, methods)
fingerprint = '%s-%s' % (obj_class.VERSION,
hashlib.md5(str(relevant_data)).hexdigest())
return fingerprint
def test_versions(self):
fingerprints = {}
for obj_name in base.NovaObject._obj_classes:
fingerprints[obj_name] = self._get_fingerprint(obj_name)
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
stored = set(object_data.items())
computed = set(fingerprints.items())
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, hash in changed:
expected[name] = object_data.get(name)
actual[name] = fingerprints.get(name)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def test_registry_matches_metaclass(self):
reference = set(object_data.keys())
actual = set(base.NovaObjectRegistry.classes)
test_objects = set(['MyObj', 'MyOwnedObject', 'TestSubclassedObject'])
# NOTE(danms): In the new registry, we don't implicitly track test
# objects, so make sure that the difference between the metaclass and
# the opt-in registry is the set of test objects.
self.assertEqual(test_objects, reference.symmetric_difference(actual))
def _get_object_field_name(self, field):
if isinstance(field._type, fields.Object):
return field._type._obj_name
if isinstance(field, fields.ListOfObjectsField):
return field._type._element_type._type._obj_name
return None
def _build_tree(self, tree, obj_class):
obj_name = obj_class.obj_name()
if obj_name in tree:
return
for name, field in obj_class.fields.items():
# Notes(yjiang5): ObjectListBase should be covered by
# child_versions test
if (issubclass(obj_class, base.ObjectListBase) and
name == 'objects'):
continue
sub_obj_name = self._get_object_field_name(field)
if sub_obj_name:
sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
self._build_tree(tree, sub_obj_class)
tree.setdefault(obj_name, {})
tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
def test_relationships(self):
tree = {}
for obj_name in base.NovaObject._obj_classes.keys():
self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
stored = set([(x, str(y)) for x, y in object_relationships.items()])
computed = set([(x, str(y)) for x, y in tree.items()])
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, deps in changed:
expected[name] = object_relationships.get(name)
actual[name] = tree.get(name)
self.assertEqual(expected, actual,
'Some objects have changed dependencies. '
'Please make sure to bump the versions of '
'parent objects and provide a rule in their '
'obj_make_compatible() routines to backlevel '
'the child object.')
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
obj_class().obj_to_primitive(target_version=test_version)
def _get_obj_to_test(self, obj_class):
obj = obj_class()
for fname, ftype in obj.fields.items():
if isinstance(ftype, fields.ObjectField):
fobjname = ftype.AUTO_TYPE._obj_name
fobjcls = base.NovaObject._obj_classes[fobjname][0]
setattr(obj, fname, self._get_obj_to_test(fobjcls))
elif isinstance(ftype, fields.ListOfObjectsField):
# FIXME(danms): This will result in no tests for this
# field type...
setattr(obj, fname, [])
return obj
def _find_version_mapping(self, my_ver, versions):
closest = None
my_ver = utils.convert_version_to_tuple(my_ver)
for _my, _child in versions:
_my = utils.convert_version_to_tuple(_my)
_child = utils.convert_version_to_tuple(_child)
if _my == my_ver:
return '%s.%s' % _child
elif _my < my_ver:
closest = _child
if closest:
return '%s.%s' % closest
else:
return None
def _validate_object_fields(self, obj_class, primitive):
for fname, ftype in obj_class.fields.items():
if isinstance(ftype, fields.ObjectField):
exp_vers = obj_class.obj_relationships[fname]
exp_ver = self._find_version_mapping(
primitive['nova_object.version'], exp_vers)
if exp_ver is None:
self.assertNotIn(fname, primitive['nova_object.data'])
else:
child_p = primitive['nova_object.data'][fname]
self.assertEqual(exp_ver,
child_p['nova_object.version'])
def test_obj_make_compatible_with_data(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
if 'tests.unit' in obj_class.__module__:
# NOTE(danms): Skip test objects. When we move to
# oslo.versionedobjects, we won't have to do this
continue
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
test_object = self._get_obj_to_test(obj_class)
obj_p = test_object.obj_to_primitive(
target_version=test_version)
self._validate_object_fields(obj_class, obj_p)
def test_obj_relationships_in_order(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
for obj_name in base.NovaObject._obj_classes:
obj_class = base.NovaObject._obj_classes[obj_name][0]
for field, versions in obj_class.obj_relationships.items():
last_my_version = (0, 0)
last_child_version = (0, 0)
for my_version, child_version in versions:
_my_version = utils.convert_version_to_tuple(my_version)
_ch_version = utils.convert_version_to_tuple(child_version)
self.assertTrue((last_my_version < _my_version
and last_child_version <= _ch_version),
'Object %s relationship '
'%s->%s for field %s is out of order' % (
obj_name, my_version, child_version,
field))
last_my_version = _my_version
last_child_version = _ch_version
class TestObjEqualPrims(test.NoDBTestCase):
def test_object_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='goodbye')
obj2.obj_reset_changes()
obj2.bar = 'goodbye'
# obj2 will be marked with field 'three' updated
self.assertTrue(base.obj_equal_prims(obj1, obj2),
"Objects that differ only because one a is marked "
"as updated should be equal")
def test_object_not_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertFalse(base.obj_equal_prims(obj1, obj2),
"Objects that differ in any field "
"should not be equal")
def test_object_ignore_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertTrue(base.obj_equal_prims(obj1, obj2, ['bar']),
"Objects that only differ in an ignored field "
"should be equal")
| apache-2.0 | 7,287,952,346,851,566,000 | 40.030885 | 79 | 0.597186 | false |
RachitKansal/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause | 7,780,601,851,362,544,000 | 31.65 | 77 | 0.649311 | false |
jboeuf/grpc | src/python/grpcio_testing/grpc_testing/_common.py | 27 | 4481 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common interfaces and implementation."""
import abc
import collections
import six
def _fuss(tuplified_metadata):
return tuplified_metadata + ((
'grpc.metadata_added_by_runtime',
'gRPC is allowed to add metadata in transmission and does so.',
),)
FUSSED_EMPTY_METADATA = _fuss(())
def fuss_with_metadata(metadata):
if metadata is None:
return FUSSED_EMPTY_METADATA
else:
return _fuss(tuple(metadata))
def rpc_names(service_descriptors):
rpc_names_to_descriptors = {}
for service_descriptor in service_descriptors:
for method_descriptor in service_descriptor.methods_by_name.values():
rpc_name = '/{}/{}'.format(service_descriptor.full_name,
method_descriptor.name)
rpc_names_to_descriptors[rpc_name] = method_descriptor
return rpc_names_to_descriptors
class ChannelRpcRead(
collections.namedtuple('ChannelRpcRead', (
'response',
'trailing_metadata',
'code',
'details',
))):
pass
class ChannelRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def initial_metadata(self):
raise NotImplementedError()
@abc.abstractmethod
def add_request(self, request):
raise NotImplementedError()
@abc.abstractmethod
def close_requests(self):
raise NotImplementedError()
@abc.abstractmethod
def take_response(self):
raise NotImplementedError()
@abc.abstractmethod
def cancel(self, code, details):
raise NotImplementedError()
@abc.abstractmethod
def termination(self):
raise NotImplementedError()
@abc.abstractmethod
def is_active(self):
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
raise NotImplementedError()
@abc.abstractmethod
def add_callback(self, callback):
raise NotImplementedError()
class ChannelHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_rpc(self, method_full_rpc_name, invocation_metadata, requests,
requests_closed, timeout):
raise NotImplementedError()
class ServerRpcRead(
collections.namedtuple('ServerRpcRead', (
'request',
'requests_closed',
'terminated',
))):
pass
REQUESTS_CLOSED = ServerRpcRead(None, True, False)
TERMINATED = ServerRpcRead(None, False, True)
class ServerRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def send_initial_metadata(self, initial_metadata):
raise NotImplementedError()
@abc.abstractmethod
def take_request(self):
raise NotImplementedError()
@abc.abstractmethod
def add_response(self, response):
raise NotImplementedError()
@abc.abstractmethod
def send_termination(self, trailing_metadata, code, details):
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
raise NotImplementedError()
class Serverish(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_unary_unary(self, method_descriptor, handler,
invocation_metadata, request, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_unary_stream(self, method_descriptor, handler,
invocation_metadata, request, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_unary(self, method_descriptor, handler,
invocation_metadata, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_stream(self, method_descriptor, handler,
invocation_metadata, deadline):
raise NotImplementedError()
| apache-2.0 | -6,973,001,175,500,439,000 | 26.660494 | 77 | 0.665923 | false |
amith01994/intellij-community | python/lib/Lib/site-packages/django/contrib/contenttypes/tests.py | 87 | 2781 | from django import db
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.contenttypes.views import shortcut
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class ContentTypesTests(TestCase):
def setUp(self):
# First, let's make sure we're dealing with a blank slate (and that
# DEBUG is on so that queries get logged)
self.old_DEBUG = settings.DEBUG
self.old_Site_meta_installed = Site._meta.installed
settings.DEBUG = True
ContentType.objects.clear_cache()
db.reset_queries()
def tearDown(self):
settings.DEBUG = self.old_DEBUG
Site._meta.installed = self.old_Site_meta_installed
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model or
by ID -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
# A second hit, though, won't hit the DB, nor will a lookup by ID
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries))
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username="john")
Site._meta.installed = True
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://example.com/users/john/", response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/", response._headers.get("location")[1])
| apache-2.0 | -1,773,615,672,891,090,400 | 39.304348 | 96 | 0.666667 | false |
nelson-liu/scikit-learn | sklearn/cluster/affinity_propagation_.py | 30 | 10689 | """Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X : array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause | -33,798,193,160,427,796 | 31.990741 | 79 | 0.597717 | false |
ztemt/NX505J_5.1_kernel | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 | 5,816,693,650,134,767,000 | 34.134483 | 80 | 0.58465 | false |
steynovich/ansible-modules-extras | cloud/vmware/vmware_vsan_cluster.py | 64 | 3944 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Russell Teague <rteague2 () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vsan_cluster
short_description: Configure VSAN clustering on an ESXi host
description:
- This module can be used to configure VSAN clustering on an ESXi host
version_added: 2.0
author: "Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_uuid:
description:
- Desired cluster UUID
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure VMware VSAN Cluster
hosts: deploy_node
gather_facts: False
tags:
- vsan
tasks:
- name: Configure VSAN on first host
vmware_vsan_cluster:
hostname: "{{ groups['esxi'][0] }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
register: vsan_cluster
- name: Configure VSAN on remaining hosts
vmware_vsan_cluster:
hostname: "{{ item }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
with_items: groups['esxi'][1:]
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def create_vsan_cluster(host_system, new_cluster_uuid):
host_config_manager = host_system.configManager
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
if new_cluster_uuid is not None:
vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
vsan_config.clusterInfo.uuid = new_cluster_uuid
vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
vsan_config.storageInfo.autoClaimStorage = True
task = vsan_system.UpdateVsan_Task(vsan_config)
changed, result = wait_for_task(task)
host_status = vsan_system.QueryHostStatus()
cluster_uuid = host_status.uuid
return changed, result, cluster_uuid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
new_cluster_uuid = module.params['cluster_uuid']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 6,137,269,441,481,967,000 | 29.338462 | 90 | 0.681542 | false |
PictureYo-self/Picture-Yo-self | code/colorpicker10.py | 6 | 2691 | #// screen manager imported from http://kivy.org/docs/api-kivy.uix.screenmanager.html
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from random import random
from kivy.uix.widget import Widget
from kivy.graphics import Color, Rectangle
from kivy.uix.button import Button
from kivy.graphics import Color, Ellipse, Line
from kivy.uix.image import Image
import sys
from kivy.clock import Clock
f = open('/home/pi/Picture-Yo-self/code/pictures/picName.txt','r')
picname = f.read()
f.close()
print picname
f = open('/home/pi/Picture-Yo-self/code/pictures/email.txt','r')
email = f.read()
f.close()
email = '/home/pi/Picture-Yo-self/code/pictures/' + email + '.png'
f = open('/home/pi/Picture-Yo-self/code/college.txt','r')
col = f.readline().strip()
f.close()
college = '/home/pi/Picture-Yo-self/code/pictures/' + col
print col
#college = '/home/pi/Picture-Yo-self/code/pictures/Jones.jpg'#' + col + '.jpg'
print college
#reload(sys)
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
color = (random(), 1, 1)
with self.canvas:
Color(*color, mode='hsv')
touch.ud['line'] = Line(points=(touch.x, touch.y), width=3)
def on_touch_move(self, touch):
touch.ud['line'].points += [touch.x, touch.y]
class MainApp(App):
im=Image(source=picname, size_hint=(1,50))
crest=Image(source=college, size_hint=(25,25))#, pos=(1,1))
def build(self):
root = BoxLayout(orientation='vertical')
parent = BoxLayout(orientation='horizontal')
painter = MyPaintWidget()
crestwid = BoxLayout(orientation='horizontal')
# create clear button
clearbtn = Button(text='Clear', size_hint=(1,5))
parent.add_widget(clearbtn)
def clear_canvas(obj):
painter.canvas.clear()
clearbtn.bind(on_release=clear_canvas)
# create retake photo button
retakebtn = Button(text='Retake Photo', size_hint=(1,5))
parent.add_widget(retakebtn)
def retake_pic(obj):
execfile("momocapture.py")
self.im.reload()
painter.canvas.clear()
retakebtn.bind(on_release=retake_pic)
# create save button
savebtn = Button(text='Save and send to email', size_hint=(1,5))
parent.add_widget(savebtn)
def save_pic(obj):
parent.remove_widget(savebtn)
parent.remove_widget(clearbtn)
parent.remove_widget(retakebtn)
root.export_to_png(email)
exit()
savebtn.bind(on_release=save_pic)
crestwid.add_widget(self.crest)
parent.add_widget(crestwid)
root.add_widget(self.im)
root.add_widget(painter)
root.add_widget(parent)
#root.add_widget(crestwid)
return root
class RootWidget(BoxLayout):
pass
if __name__ == '__main__':
MainApp().run()
| gpl-2.0 | -4,871,375,092,691,057,000 | 27.62766 | 85 | 0.708658 | false |
widdowquinn/THAPBI-pycits | pycits/cd_hit.py | 1 | 3265 | #!/usr/bin/env python
#
# cd_hit * (clusterassembled reads with database)
# cd_hit_est used as this is the nt clustering tool
# http://weizhongli-lab.org/lab-wiki/doku.php?id=cd-hit-user-guide
# follow this link to get the download.
# https://github.com/weizhongli/cdhit
# cd_hit-0.9.10-bin-64.tar.gz
#
# (c) The James Hutton Institute 2016
# Author: Leighton Pritchard and Peter Thorpe
import os
import subprocess
from collections import namedtuple
from .tools import is_exe, NotExecutableError
# factory class for cd_hit class returned values
Results = namedtuple("Results", "command fastaout clusters " +
"stdout stderr")
class Cd_hit_Error(Exception):
"""Exception raised when cd_hit fails"""
def __init__(self, message):
self.message = message
class Cd_hit(object):
"""Class for working with cd_hit"""
def __init__(self, exe_path):
"""Instantiate with location of executable"""
if not is_exe(exe_path):
msg = "{0} is not an executable".format(exe_path)
raise NotExecutableError(msg)
self._exe_path = exe_path
def run(self, fasta_in, threads, threshold, outdir, prefix, dry_run=False):
"""Run cd_hit to cluster passed fasta files
- fasta_in - fasta file to be clustered
- threshold - threshold to cluster at
- threads - number of threads for cd_hit to use
- outdir - output directory for clustering output
- prefix - file prefix for cd_hit output
- dry_run - if True, returns cmd-line but does not run
Returns a tuple of output filenames, and the STOUT returned by the
cd_hit run.
"""
self.__build_cmd(fasta_in, threads, threshold, outdir, prefix)
if dry_run:
return(self._cmd)
pipe = subprocess.run(self._cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
results = Results(self._cmd, *self._outfnames, pipe.stdout,
pipe.stderr)
return results
def __build_cmd(self, fasta_in, threads, threshold, outdir, prefix):
"""Build a command-line for cd_hit_est.
cd_hit takes a path to an output directory PLUS the prefix of the
files to write, such that
-o a/b/cdefg
writes files
a/b/cdefg
a/b/cdefg.clstr
and so on.
-d added to the command is so the output clusters will write out the
names up to 500 letters long. The default chops these at 20.
(too short)
-M added to allow unlimited memeroy - not a problem for
small jobs. If job are big, we will have to alter this.
"""
# outfiles are name WhatEver.out + .bak.clstr and + .clstr
self._outfnames = [os.path.join(outdir, prefix) + suffix for suffix in
('.fasta', '.clstr')]
cmd = ["cd-hit-est",
"-i", fasta_in,
"-o", os.path.join(outdir, prefix),
"-T {0}".format(threads),
"-M", "0",
"-c", str(threshold),
"-d", "500"]
self._cmd = ' '.join(cmd)
| mit | 5,297,080,511,318,774,000 | 32.316327 | 79 | 0.583461 | false |
mbrukman/flocker | admin/functional/test_aws.py | 14 | 1570 | import shutil
import os
from unittest import skipIf
import boto
from effect import Effect, sync_perform, ComposedDispatcher
from twisted.python.filepath import FilePath
from twisted.trial.unittest import SynchronousTestCase
from ..aws import boto_dispatcher, UploadToS3
from flocker.provision._effect import dispatcher as base_dispatcher
from flocker.testtools import random_name
# Bucket to use for testing
bucket_name = 'clusterhq-archive-testing'
try:
boto.connect_s3().head_bucket(bucket_name)
_can_connect = True
except:
_can_connect = False
if_aws = skipIf(not _can_connect, "Requires boto AWS credentials")
class AWSTest(SynchronousTestCase):
@if_aws
def test_upload_content_type(self):
"""
A content type can be set for an uploaded file.
"""
filename = random_name(self)
tmpdir = FilePath(self.mktemp())
tmpdir.makedirs()
tmpfile = tmpdir.child(filename)
tmpfile.setContent('foo')
s3 = boto.connect_s3()
bucket = s3.get_bucket(bucket_name)
self.addCleanup(bucket.delete_key, filename)
sync_perform(
dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]),
effect=Effect(UploadToS3(
source_path=tmpdir,
target_bucket=bucket_name,
target_key=filename,
file=tmpfile,
content_type='application/json',
))
)
key = bucket.get_key(filename)
self.assertEqual('application/json', key.content_type)
| apache-2.0 | -4,089,517,158,436,252,700 | 29.192308 | 78 | 0.657962 | false |
hughperkins/kgsgo-dataset-preprocessor | dataset_partitioner.py | 1 | 7011 | #!/usr/bin/python
#
# Copyright Hugh Perkins 2015 hughperkins at gmail
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# goal of this is to partition the data into two sets:
# - training data
# - testing data
#
# These sets will be assigned according to the following principles:
# - both sets should be relatively stable, not change as new archive data is available
# - test set should be not too big, but large enough that accuracy will be to around 0.1%
# - training set will contain the rest of the data
# - the same matches should not be present in both training and test set (not even different moves from
# the same match)
# - should probably be documented which are in which perhaps? eg stored as a python file in the
# repository (or as a yaml file?)
from __future__ import print_function, unicode_literals, division, absolute_import
from os import path, sys
sys.path.append( path.dirname(path.abspath(__file__)) + '/thirdparty/future/src' )
from builtins import ( bytes, dict, int, list, object, range, str, ascii, chr,
hex, input, next, oct, open, pow, round, super, filter, map, zip )
import sys, os, time, random
import index_processor
numTestGames = 100
testGames = []
trainGames = []
def draw_samples( dataDirectory, numSamples ):
# draws filename, and game index number, from the available games
# without replacement (so we should check for dupes :-( )
# first we should create a single list, containing pairs of ( filename, gameindex )
# then we will draw samples from this
# we should restrict the available games to something static, eg everything up to dec 2014, inclusive
availableGames = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
availableGames.append( ( filename, i ) )
print( 'total num games: ' + str( len( availableGames ) ) )
# need to seed random first
random.seed(0)
samplesSet = set()
while len( samplesSet ) < numSamples:
sample = random.choice( availableGames )
if sample not in samplesSet:
samplesSet.add( sample )
print( 'Drawn ' + str( numSamples ) + ' samples:' )
# copy to list
samples = list( samplesSet )
return samples
def draw_training_games( dataDirectory ):
# gets list of all non-test games, that are no later than dec 2014
global testGames
train_games = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
sample = ( filename, i )
if sample not in testGames:
train_games.append( sample )
print( 'total num training games: ' + str( len( train_games ) ) )
def draw_test_samples( dataDirectory ):
global numTestGames, testGames
if len( testGames ) > 0:
return testGames
try:
testSampleFile = open( 'test_samples.py', 'r' )
samplesContents = testSampleFile.read()
testSampleFile.close()
for line in samplesContents.split('\n'):
#print( line )
if line != "":
( filename, index ) = eval( line )
testGames.append( ( filename, index ) )
except Exception as e:
print( e )
testGames = draw_samples( dataDirectory, numTestGames )
testSampleFile = open( '~test_samples.py', 'w' )
for sample in testGames:
testSampleFile.write( str( sample ) + "\n" )
testSampleFile.close()
os.rename( '~test_samples.py', 'test_samples.py' )
# for sample in testGames:
# print( 'testgame: ' + str( sample ) )
return testGames
# draw training games, not overlapping with any of the test games
def draw_training_samples( dataDirectory, numSamples ):
test_samples = draw_test_samples( dataDirectory )
availableGames = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
availableGames.append( ( filename, i ) )
print( 'total num games: ' + str( len( availableGames ) ) )
# need to seed random first
random.seed(0)
# I suppose the first 100 samples will be the testing ones :-P
# anyway, just skip those....
samplesSet = set()
while len( samplesSet ) < numSamples:
sample = random.choice( availableGames )
if sample not in test_samples:
samplesSet.add( sample )
print( 'Drawn ' + str( numSamples ) + ' samples:' )
# copy to list
samples = list( samplesSet )
return samples
def draw_all_training( dataDirectory ):
test_samples = draw_test_samples( dataDirectory )
availableGames = []
fileinfos = index_processor.get_fileInfos( dataDirectory )
for fileinfo in fileinfos:
filename = fileinfo['filename']
year = int( filename.split('-')[1].split('_')[0] )
if year > 2014:
continue # ignore after 2014, to keep the set of games fixed
numgames = fileinfo['numGames']
for i in range( numgames ):
availableGames.append( ( filename, i ) )
print( 'total num games: ' + str( len( availableGames ) ) )
# need to seed random first
random.seed(0)
# I suppose the first 100 samples will be the testing ones :-P
# anyway, just skip those....
samplesSet = set()
for sample in availableGames:
if sample not in test_samples:
samplesSet.add( sample )
print( 'Drawn all samples, ie ' + str( len( samplesSet ) ) + ' samples:' )
# copy to list
samples = list( samplesSet )
return samples
def draw_training_10k( dataDirectory ):
return draw_training_samples( dataDirectory, 10000 )
def go(dataDirectory):
testsamples = draw_test_samples( dataDirectory )
for sample in testsamples:
print( 'testgame: ' + str( sample ) )
# all other games are training games...
draw_training_games( dataDirectory )
if __name__ == '__main__':
sTargetDirectory = 'data'
if len(sys.argv) == 2:
sTargetDirectory = sys.argv[1]
go(sTargetDirectory)
| mpl-2.0 | 7,069,289,450,345,646,000 | 36.693548 | 105 | 0.637712 | false |
mattlevesque/py-shrt-lkr | py_shrt_lkr/tests_ORG.py | 2 | 1767 | import unittest
import transaction
from pyramid import testing
from .core.models import DBSession
# class TestMyViewSuccessCondition(unittest.TestCase):
# def setUp(self):
# self.config = testing.setUp()
# from sqlalchemy import create_engine
# engine = create_engine('sqlite://')
# from .models import (
# Base,
# MyModel,
# )
# DBSession.configure(bind=engine)
# Base.metadata.create_all(engine)
# with transaction.manager:
# model = MyModel(name='one', value=55)
# DBSession.add(model)
#
# def tearDown(self):
# DBSession.remove()
# testing.tearDown()
#
# def test_passing_view(self):
# from .views import my_view
# request = testing.DummyRequest()
# info = my_view(request)
# self.assertEqual(info['one'].name, 'one')
# self.assertEqual(info['project'], 'py-shrt-lkr')
#
#
# class TestMyViewFailureCondition(unittest.TestCase):
# def setUp(self):
# self.config = testing.setUp()
# from sqlalchemy import create_engine
# engine = create_engine('sqlite://')
# DBSession.configure(bind=engine)
#
# def tearDown(self):
# DBSession.remove()
# testing.tearDown()
#
# def test_failing_view(self):
# from .views import my_view
# request = testing.DummyRequest()
# info = my_view(request)
# self.assertEqual(info.status_int, 500)
##
##
# Services tests
##
class TestServiceUserSuccessCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_failing_view(self):
from .core.services import UserService
print("TEST")
self.assertEqual(UserService.get_by_id(77), -10, "Yup that works")
| gpl-2.0 | -24,629,280,700,605,896 | 23.541667 | 68 | 0.696661 | false |
BT-rmartin/odoo | addons/account/wizard/account_move_line_select.py | 385 | 2800 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_move_line_select(osv.osv_memory):
"""
Account move line select
"""
_name = "account.move.line.select"
_description = "Account move line select"
def open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
account_obj = self.pool.get('account.account')
fiscalyear_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
if 'fiscalyear' not in context:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
fiscalyear_ids = [context['fiscalyear']]
fiscalyears = fiscalyear_obj.browse(cr, uid, fiscalyear_ids, context=context)
period_ids = []
if fiscalyears:
for fiscalyear in fiscalyears:
for period in fiscalyear.period_ids:
period_ids.append(period.id)
domain = str(('period_id', 'in', period_ids))
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
result['context'] = {
'fiscalyear': False,
'account_id': context['active_id'],
'active_id': context['active_id'],
}
if context['active_id']:
acc_data = account_obj.browse(cr, uid, context['active_id']).child_consol_ids
if acc_data:
result['context'].update({'consolidate_children': True})
result['domain']=result['domain'][0:-1]+','+domain+result['domain'][-1]
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,376,323,157,273,474,000 | 38.43662 | 91 | 0.585 | false |
asposewords/Aspose_Words_Cloud | Examples/Python/Examples/GetSpecificRunOfParagraphFromWordDocument.py | 2 | 1537 | import asposewordscloud
from asposewordscloud.WordsApi import WordsApi
from asposewordscloud.WordsApi import ApiException
from asposewordscloud.models import RunResponse
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Words API SDK
api_client = asposewordscloud.ApiClient.ApiClient(apiKey, appSid, True)
wordsApi = WordsApi(api_client)
#set input file name
filename = "SampleWordDocument.docx"
index = 1
runIndex = 0
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Words Cloud SDK API to get a specific run of a paragraph from a word document
response = wordsApi.GetDocumentParagraphRun(name=filename, index=index, runIndex=runIndex)
if response.Status == "OK":
docParagraphRun = response.Run
#display document paragraph run info
if docParagraphRun is not None:
print "NoteId : " + docParagraphRun.NodeId
print "Text : " + docParagraphRun.Text
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| mit | 2,533,365,269,384,234,500 | 33.155556 | 96 | 0.748861 | false |
mhaessig/servo | tests/wpt/css-tests/tools/pywebsocket/src/setup.py | 434 | 2863 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Set up script for mod_pywebsocket.
"""
from distutils.core import setup, Extension
import sys
_PACKAGE_NAME = 'mod_pywebsocket'
# Build and use a C++ extension for faster masking. SWIG is required.
_USE_FAST_MASKING = False
if sys.version < '2.3':
print >> sys.stderr, '%s requires Python 2.3 or later.' % _PACKAGE_NAME
sys.exit(1)
if _USE_FAST_MASKING:
setup(ext_modules=[
Extension(
'mod_pywebsocket/_fast_masking',
['mod_pywebsocket/fast_masking.i'],
swig_opts=['-c++'])])
setup(author='Yuzo Fujishima',
author_email='[email protected]',
description='WebSocket extension for Apache HTTP Server.',
long_description=(
'mod_pywebsocket is an Apache HTTP Server extension for '
'the WebSocket Protocol (RFC 6455). '
'See mod_pywebsocket/__init__.py for more detail.'),
license='See COPYING',
name=_PACKAGE_NAME,
packages=[_PACKAGE_NAME, _PACKAGE_NAME + '.handshake'],
url='http://code.google.com/p/pywebsocket/',
# See the source of distutils.version, distutils.versionpredicate and
# distutils.dist to understand how to name version numbers.
version='0.7.9',
)
# vi:sts=4 sw=4 et
| mpl-2.0 | 8,322,786,399,046,040,000 | 37.689189 | 75 | 0.699965 | false |
lfz/Guided-Denoise | Attackset/fgsm_inresv2_random/attack_iter.py | 1 | 10010 | """Implementation of sample attack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.misc import imread
from scipy.misc import imsave
from nets import inception_v3, inception_v4, inception_resnet_v2, resnet_v2
from functools import partial
from multiprocessing import Pool
import tensorflow as tf
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_adv_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_ens3_adv_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_ens4_adv_inception_v3', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_inception_v4', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_inception_resnet_v2', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_ens_adv_inception_resnet_v2', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'checkpoint_path_resnet', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_dir', '', 'Output directory with images.')
tf.flags.DEFINE_float(
'max_epsilon', 16.0, 'Maximum size of adversarial perturbation.')
tf.flags.DEFINE_integer(
'num_iter', 10, 'Number of iterations.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 10, 'How many images process at one time.')
tf.flags.DEFINE_integer(
'use_existing', 0, 'whether reuse existing result')
tf.flags.DEFINE_integer(
'random_eps', 0, 'whether use random pertubation')
tf.flags.DEFINE_float(
'momentum', 1.0, 'Momentum.')
tf.flags.DEFINE_string(
'gpu','0','')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = imread(f, mode='RGB').astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def save_images(arg):
image,filename,output_dir = arg
imsave(os.path.join(output_dir, filename), (image + 1.0) * 0.5, format='png')
def graph(x, y, i, x_max, x_min, grad, eps_inside):
num_iter = FLAGS.num_iter
alpha = eps_inside / num_iter
momentum = FLAGS.momentum
num_classes = 1001
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
logits_res_v2, end_points_res_v2 = inception_resnet_v2.inception_resnet_v2(
x, num_classes=num_classes, is_training=False)
pred = tf.argmax( end_points_res_v2['Predictions'], 1)
first_round = tf.cast(tf.equal(i, 0), tf.int64)
y = first_round * pred + (1 - first_round) * y
one_hot = tf.one_hot(y, num_classes)
logits = logits_res_v2
auxlogits = end_points_res_v2['AuxLogits']
cross_entropy = tf.losses.softmax_cross_entropy(one_hot,
logits,
label_smoothing=0.0,
weights=1.0)
cross_entropy += tf.losses.softmax_cross_entropy(one_hot,
auxlogits,
label_smoothing=0.0,
weights=0.4)
noise = tf.gradients(cross_entropy, x)[0]
x = x + alpha * tf.sign(noise)
x = tf.clip_by_value(x, x_min, x_max)
i = tf.add(i, 1)
return x, y, i, x_max, x_min, noise, eps_inside
def stop(x, y, i, x_max, x_min, grad, eps_inside):
num_iter = FLAGS.num_iter
return tf.less(i, num_iter)
def main(_):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
print(FLAGS.output_dir)
#eps = 2.0 * FLAGS.max_epsilon / 255.0
gpus = np.array(FLAGS.gpu.split(',')).astype('int')
n_gpus = len(gpus)
bs_single = FLAGS.batch_size
FLAGS.batch_size *= n_gpus
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
batch_shape_single = [bs_single, FLAGS.image_height, FLAGS.image_width, 3]
tf.logging.set_verbosity(tf.logging.INFO)
pool = Pool()
with tf.Graph().as_default(), tf.device('/cpu:0'):
flists = set([f for f in os.listdir(FLAGS.input_dir) if 'png' in f])
if FLAGS.use_existing == 1:
flists_existing = set([f for f in os.listdir(FLAGS.output_dir) if 'png' in f ])
newfiles = list(flists.difference(flists_existing))
newfiles = [os.path.join(FLAGS.input_dir,f) for f in newfiles]
else:
newfiles = [os.path.join(FLAGS.input_dir,f) for f in flists]
print('creating %s new files'%(len(newfiles)))
if len(newfiles) == 0:
return
filename_queue = tf.train.string_input_producer(newfiles, shuffle = False, num_epochs = FLAGS.batch_size)
image_reader = tf.WholeFileReader()
filename, image_file = image_reader.read(filename_queue)
image = tf.image.decode_png(image_file)
image.set_shape((299, 299, 3))
eps = tf.placeholder(dtype='float32', shape = [FLAGS.batch_size, None, None, None])
# Generate batch
num_preprocess_threads = 20
min_queue_examples = 256
images,filenames = tf.train.batch(
[image,filename],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity= 3 * FLAGS.batch_size,
allow_smaller_final_batch = False)
images = tf.cast(images,tf.float32)/255.0*2.-1.
images_splits = tf.split(axis=0, num_or_size_splits=n_gpus, value=images)
eps_splits = tf.split(axis=0, num_or_size_splits=n_gpus, value=eps)
# Prepare graph
#x_input = tf.placeholder(tf.float32, shape=batch_shape)
x_advlist = []
for i_gpu in range(n_gpus):
start = i_gpu*bs_single
print('gpu'+str(i_gpu))
with tf.device('/gpu:'+str(i_gpu)):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i_gpu > 0 else None):
# with tf.name_scope('%s_%d' % ('tower', i_gpu)):
x_in_single = images_splits[i_gpu]
eps_single = eps_splits[i_gpu]
x_max = tf.clip_by_value(x_in_single + eps_single, -1.0, 1.0)
x_min = tf.clip_by_value(x_in_single - eps_single, -1.0, 1.0)
bs_this = x_in_single.shape[0]
y = tf.constant(np.zeros([bs_single]), tf.int64)
i = tf.constant(0)
grad = tf.zeros_like(x_in_single)
x_adv, _, _, _, _, _, _ = tf.while_loop(stop, graph, [x_in_single, y, i, x_max, x_min, grad, eps_single])
x_advlist.append(x_adv)
x_adv = tf.concat(x_advlist,0)
# Run computation
s6 = tf.train.Saver(slim.get_model_variables(scope='InceptionResnetV2'))
init = (tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init)
s6.restore(sess, FLAGS.checkpoint_path_inception_resnet_v2)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
n_iter = -(-(len(newfiles))//FLAGS.batch_size)
stack_img = []
stack_names = []
for i in range(n_iter):
if FLAGS.random_eps:
eps_value = np.random.randint(1,FLAGS.max_epsilon, [FLAGS.batch_size,1,1,1])
else:
eps_value = np.ones([FLAGS.batch_size,1,1,1]) * FLAGS.max_epsilon
eps_value = eps_value.astype('float32') *2 /255
names,adv_images,orig_images = sess.run([filenames,x_adv,images], feed_dict={eps:eps_value})
names = [os.path.basename(name) for name in names]
stack_img.append(adv_images)
stack_names.append(names)
# save_images2(adv_images, names, FLAGS.output_dir, pool)
# save_images(adv_images, names, FLAGS.output_dir)
if ((i+1)%100 ==0) or i == n_iter-1:
print("%d / %d"%(i+1,n_iter))
stack_img = np.concatenate(stack_img)
stack_names = np.concatenate(stack_names)
#partial_save = partial(save_one,images=stack_img,filenames=stack_names,output_dir=FLAGS.output_dir)
paras = ((im,name,FLAGS.output_dir) for (im,name) in zip(stack_img,stack_names))
pool.map_async(save_images,paras)
stack_img = []
stack_names = []
# save_images(adv_images, filenames, FLAGS.output_dir)
# Finish off the filename queue coordinator.
coord.request_stop()
coord.join(threads)
pool.close()
pool.join()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | -4,969,050,275,284,841,000 | 36.490637 | 117 | 0.625774 | false |
Captnoord/openpli-enigma2 | lib/python/Screens/ChoiceBox.py | 10 | 4653 | from Screens.Screen import Screen
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.ChoiceList import ChoiceEntryComponent, ChoiceList
from Components.Sources.StaticText import StaticText
import enigma
class ChoiceBox(Screen):
def __init__(self, session, title = "", list = [], keys = None, selection = 0, skin_name = []):
Screen.__init__(self, session)
if isinstance(skin_name, str):
skin_name = [skin_name]
self.skinName = skin_name + ["ChoiceBox"]
self["text"] = Label(title)
self.list = []
self.summarylist = []
if keys is None:
self.__keys = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "red", "green", "yellow", "blue" ] + (len(list) - 10) * [""]
else:
self.__keys = keys + (len(list) - len(keys)) * [""]
self.keymap = {}
pos = 0
for x in list:
strpos = str(self.__keys[pos])
self.list.append(ChoiceEntryComponent(key = strpos, text = x))
if self.__keys[pos] != "":
self.keymap[self.__keys[pos]] = list[pos]
self.summarylist.append((self.__keys[pos],x[0]))
pos += 1
self["list"] = ChoiceList(list = self.list, selection = selection)
self["summary_list"] = StaticText()
self["summary_selection"] = StaticText()
self.updateSummary(selection)
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "ColorActions", "DirectionActions"],
{
"ok": self.go,
"back": self.cancel,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"red": self.keyRed,
"green": self.keyGreen,
"yellow": self.keyYellow,
"blue": self.keyBlue,
"up": self.up,
"down": self.down
}, -1)
def autoResize(self):
orgwidth = self.instance.size().width()
orgpos = self.instance.position()
textsize = self["text"].getSize()
count = len(self.list)
if count > 10:
count = 10
offset = 25 * count
wsizex = textsize[0] + 60
wsizey = textsize[1] + offset
if (520 > wsizex):
wsizex = 520
wsize = (wsizex, wsizey)
# resize
self.instance.resize(enigma.eSize(*wsize))
# resize label
self["text"].instance.resize(enigma.eSize(*textsize))
# move list
listsize = (wsizex, 25 * count)
self["list"].instance.move(enigma.ePoint(0, textsize[1]))
self["list"].instance.resize(enigma.eSize(*listsize))
# center window
newwidth = wsize[0]
self.instance.move(enigma.ePoint((720-wsizex)/2, (576-wsizey)/(count > 7 and 2 or 3)))
def keyLeft(self):
pass
def keyRight(self):
pass
def up(self):
if len(self["list"].list) > 0:
while 1:
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self.updateSummary(self["list"].l.getCurrentSelectionIndex())
if self["list"].l.getCurrentSelection()[0][0] != "--" or self["list"].l.getCurrentSelectionIndex() == 0:
break
def down(self):
if len(self["list"].list) > 0:
while 1:
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self.updateSummary(self["list"].l.getCurrentSelectionIndex())
if self["list"].l.getCurrentSelection()[0][0] != "--" or self["list"].l.getCurrentSelectionIndex() == len(self["list"].list) - 1:
break
# runs a number shortcut
def keyNumberGlobal(self, number):
self.goKey(str(number))
# runs the current selected entry
def go(self):
cursel = self["list"].l.getCurrentSelection()
if cursel:
self.goEntry(cursel[0])
else:
self.cancel()
# runs a specific entry
def goEntry(self, entry):
if len(entry) > 2 and isinstance(entry[1], str) and entry[1] == "CALLFUNC":
# CALLFUNC wants to have the current selection as argument
arg = self["list"].l.getCurrentSelection()[0]
entry[2](arg)
else:
self.close(entry)
# lookups a key in the keymap, then runs it
def goKey(self, key):
if self.keymap.has_key(key):
entry = self.keymap[key]
self.goEntry(entry)
# runs a color shortcut
def keyRed(self):
self.goKey("red")
def keyGreen(self):
self.goKey("green")
def keyYellow(self):
self.goKey("yellow")
def keyBlue(self):
self.goKey("blue")
def updateSummary(self, curpos=0):
pos = 0
summarytext = ""
for entry in self.summarylist:
if pos > curpos-2 and pos < curpos+5:
if pos == curpos:
summarytext += ">"
self["summary_selection"].setText(entry[1])
else:
summarytext += entry[0]
summarytext += ' ' + entry[1] + '\n'
pos += 1
self["summary_list"].setText(summarytext)
def cancel(self):
self.close(None)
| gpl-2.0 | 3,890,250,496,846,714,400 | 27.722222 | 133 | 0.649688 | false |
hpcuantwerpen/easybuild-easyblocks | easybuild/easyblocks/b/blacs.py | 3 | 8124 | ##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing BLACS, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import glob
import re
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
# also used by ScaLAPACK
def det_interface(log, path):
"""Determine interface through 'xintface' heuristic tool"""
(out, _) = run_cmd(os.path.join(path, "xintface"), log_all=True, simple=False)
intregexp = re.compile(r".*INTFACE\s*=\s*-D(\S+)\s*")
res = intregexp.search(out)
if res:
return res.group(1)
else:
raise EasyBuildError("Failed to determine interface, output for xintface: %s", out)
class EB_BLACS(ConfigureMake):
"""
Support for building/installing BLACS
- configure: symlink BMAKES/Bmake.MPI-LINUX to Bmake.inc
- make install: copy files
"""
def configure_step(self):
"""Configure BLACS build by copying Bmake.inc file."""
src = os.path.join(self.cfg['start_dir'], 'BMAKES', 'Bmake.MPI-LINUX')
dest = os.path.join(self.cfg['start_dir'], 'Bmake.inc')
if not os.path.isfile(src):
raise EasyBuildError("Can't find source file %s", src)
if os.path.exists(dest):
raise EasyBuildError("Destination file %s exists", dest)
try:
shutil.copy(src, dest)
except OSError as err:
raise EasyBuildError("Copying %s to %s failed: %s", src, dest, err)
def build_step(self):
"""Build BLACS using build_step, after figuring out the make options based on the heuristic tools available."""
opts = {
'mpicc': "%s %s" % (os.getenv('MPICC'), os.getenv('CFLAGS')),
'mpif77': "%s %s" % (os.getenv('MPIF77'), os.getenv('FFLAGS')),
'f77': os.getenv('F77'),
'cc': os.getenv('CC'),
'builddir': os.getcwd(),
'mpidir': os.path.dirname(os.getenv('MPI_LIB_DIR')),
}
# determine interface and transcomm settings
comm = ''
interface = 'UNKNOWN'
try:
cwd = os.getcwd()
os.chdir('INSTALL')
# need to build
cmd = "make"
cmd += " CC='%(mpicc)s' F77='%(mpif77)s' MPIdir=%(mpidir)s" \
" MPILIB='' BTOPdir=%(builddir)s INTERFACE=NONE" % opts
# determine interface using xintface
run_cmd("%s xintface" % cmd, log_all=True, simple=True)
interface = det_interface(self.log, "./EXE")
# try and determine transcomm using xtc_CsameF77 and xtc_UseMpich
if not comm:
run_cmd("%s xtc_CsameF77" % cmd, log_all=True, simple=True)
(out, _) = run_cmd(self.toolchain.mpi_cmd_for("./EXE/xtc_CsameF77", 2), log_all=True, simple=False)
# get rid of first two lines, that inform about how to use this tool
out = '\n'.join(out.split('\n')[2:])
notregexp = re.compile("_NOT_")
if not notregexp.search(out):
# if it doesn't say '_NOT_', set it
comm = "TRANSCOMM='-DCSameF77'"
else:
(_, ec) = run_cmd("%s xtc_UseMpich" % cmd, log_all=False, log_ok=False, simple=False)
if ec == 0:
(out, _) = run_cmd(self.toolchain.mpi_cmd_for("./EXE/xtc_UseMpich", 2),
log_all=True, simple=False)
if not notregexp.search(out):
commregexp = re.compile(r'Set TRANSCOMM\s*=\s*(.*)$')
res = commregexp.search(out)
if res:
# found how to set TRANSCOMM, so set it
comm = "TRANSCOMM='%s'" % res.group(1)
else:
# no match, set empty TRANSCOMM
comm = "TRANSCOMM=''"
else:
# if it fails to compile, set empty TRANSCOMM
comm = "TRANSCOMM=''"
os.chdir(cwd)
except OSError as err:
raise EasyBuildError("Failed to determine interface and transcomm settings: %s", err)
opts.update({
'comm': comm,
'int': interface,
})
add_makeopts = ' MPICC="%(mpicc)s" MPIF77="%(mpif77)s" %(comm)s ' % opts
add_makeopts += ' INTERFACE=%(int)s MPIdir=%(mpidir)s BTOPdir=%(builddir)s mpi ' % opts
self.cfg.update('buildopts', add_makeopts)
super(EB_BLACS, self).build_step()
def install_step(self):
"""Install by copying files to install dir."""
# include files and libraries
for (srcdir, destdir, ext) in [
(os.path.join("SRC", "MPI"), "include", ".h"), # include files
("LIB", "lib", ".a"), # libraries
]:
src = os.path.join(self.cfg['start_dir'], srcdir)
dest = os.path.join(self.installdir, destdir)
try:
os.makedirs(dest)
os.chdir(src)
for lib in glob.glob('*%s' % ext):
# copy file
shutil.copy2(os.path.join(src, lib), dest)
self.log.debug("Copied %s to %s" % (lib, dest))
if destdir == 'lib':
# create symlink with more standard name for libraries
symlink_name = "lib%s.a" % lib.split('_')[0]
os.symlink(os.path.join(dest, lib), os.path.join(dest, symlink_name))
self.log.debug("Symlinked %s/%s to %s" % (dest, lib, symlink_name))
except OSError as err:
raise EasyBuildError("Copying %s/*.%s to installation dir %s failed: %s", src, ext, dest, err)
# utilities
src = os.path.join(self.cfg['start_dir'], 'INSTALL', 'EXE', 'xintface')
dest = os.path.join(self.installdir, 'bin')
try:
os.makedirs(dest)
shutil.copy2(src, dest)
self.log.debug("Copied %s to %s" % (src, dest))
except OSError as err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", src, dest, err)
def sanity_check_step(self):
"""Custom sanity check for BLACS."""
custom_paths = {
'files': [fil for filptrn in ["blacs", "blacsCinit", "blacsF77init"]
for fil in ["lib/lib%s.a" % filptrn,
"lib/%s_MPI-LINUX-0.a" % filptrn]] +
["bin/xintface"],
'dirs': []
}
super(EB_BLACS, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 | -2,881,020,283,697,042,400 | 35.430493 | 119 | 0.555268 | false |
cjayb/mne-python | tutorials/source-modeling/plot_eeg_no_mri.py | 4 | 2723 | # -*- coding: utf-8 -*-
"""
.. _tut-eeg-fsaverage-source-modeling:
EEG forward operator with a template MRI
========================================
This tutorial explains how to compute the forward operator from EEG data
using the standard template MRI subject ``fsaverage``.
.. caution:: Source reconstruction without an individual T1 MRI from the
subject will be less accurate. Do not over interpret
activity locations which can be off by multiple centimeters.
.. contents:: This tutorial covers:
:local:
:depth: 2
"""
# Authors: Alexandre Gramfort <[email protected]>
# Joan Massich <[email protected]>
#
# License: BSD Style.
import os.path as op
import mne
from mne.datasets import eegbci
from mne.datasets import fetch_fsaverage
# Download fsaverage files
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
# The files live in:
subject = 'fsaverage'
trans = 'fsaverage' # MNE has a built-in fsaverage transformation
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
##############################################################################
# Load the data
# -------------
#
# We use here EEG data from the BCI dataset.
#
# .. note:: See :ref:`plot_montage` to view all the standard EEG montages
# available in MNE-Python.
raw_fname, = eegbci.load_data(subject=1, runs=[6])
raw = mne.io.read_raw_edf(raw_fname, preload=True)
# Clean channel names to be able to use a standard 1005 montage
new_names = dict(
(ch_name,
ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp'))
for ch_name in raw.ch_names)
raw.rename_channels(new_names)
# Read and set the EEG electrode locations
montage = mne.channels.make_standard_montage('standard_1005')
raw.set_montage(montage)
raw.set_eeg_reference(projection=True) # needed for inverse modeling
# Check that the locations of EEG electrodes is correct with respect to MRI
mne.viz.plot_alignment(
raw.info, src=src, eeg=['original', 'projected'], trans=trans,
show_axes=True, mri_fiducials=True, dig='fiducials')
##############################################################################
# Setup source space and compute forward
# --------------------------------------
fwd = mne.make_forward_solution(raw.info, trans=trans, src=src,
bem=bem, eeg=True, mindist=5.0, n_jobs=1)
print(fwd)
# for illustration purposes use fwd to compute the sensitivity map
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[5, 50, 100]))
| bsd-3-clause | -7,828,864,551,540,037,000 | 32.207317 | 78 | 0.63643 | false |
artas360/pythran | pythran/tests/cases/periodic_dist.py | 5 | 1259 | #pythran export dist(float [], float[], float[], int, bool, bool, bool)
#runas import numpy as np ; N = 20 ; x = np.arange(0., N, 0.1) ; L = 4 ; periodic = True ; dist(x, x, x, L,periodic, periodic, periodic)
#bench import numpy as np ; N = 300 ; x = np.arange(0., N, 0.1) ; L = 4 ; periodic = True ; dist(x, x, x, L,periodic, periodic, periodic)
import numpy as np
def dist(x, y, z, L, periodicX, periodicY, periodicZ):
" ""Computes distances between all particles and places the result in a matrix such that the ij th matrix entry corresponds to the distance between particle i and j"" "
N = len(x)
xtemp = np.tile(x,(N,1))
dx = xtemp - xtemp.T
ytemp = np.tile(y,(N,1))
dy = ytemp - ytemp.T
ztemp = np.tile(z,(N,1))
dz = ztemp - ztemp.T
# Particles 'feel' each other across the periodic boundaries
if periodicX:
dx[dx>L/2]=dx[dx > L/2]-L
dx[dx<-L/2]=dx[dx < -L/2]+L
if periodicY:
dy[dy>L/2]=dy[dy>L/2]-L
dy[dy<-L/2]=dy[dy<-L/2]+L
if periodicZ:
dz[dz>L/2]=dz[dz>L/2]-L
dz[dz<-L/2]=dz[dz<-L/2]+L
# Total Distances
d = np.sqrt(dx**2+dy**2+dz**2)
# Mark zero entries with negative 1 to avoid divergences
d[d==0] = -1
return d, dx, dy, dz
| bsd-3-clause | 1,999,330,602,204,667,100 | 34.971429 | 172 | 0.585385 | false |
emedvedev/st2 | st2actions/tests/unit/test_async_runner.py | 8 | 1802 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except:
import json
from st2actions.runners import AsyncActionRunner
from st2common.constants.action import (LIVEACTION_STATUS_RUNNING)
RAISE_PROPERTY = 'raise'
def get_runner():
return AsyncTestRunner()
class AsyncTestRunner(AsyncActionRunner):
def __init__(self):
super(AsyncTestRunner, self).__init__(runner_id='1')
self.pre_run_called = False
self.run_called = False
self.post_run_called = False
def pre_run(self):
self.pre_run_called = True
def run(self, action_params):
self.run_called = True
result = {}
if self.runner_parameters.get(RAISE_PROPERTY, False):
raise Exception('Raise required.')
else:
result = {
'ran': True,
'action_params': action_params
}
return (LIVEACTION_STATUS_RUNNING, json.dumps(result), {'id': 'foo'})
def post_run(self, status, result):
self.post_run_called = True
| apache-2.0 | -2,797,841,502,659,849,000 | 31.763636 | 77 | 0.68091 | false |
SectorLabs/django-postgres-extra | psqlextra/partitioning/manager.py | 1 | 4538 | from typing import List, Optional, Tuple
from django.db import connections
from psqlextra.models import PostgresPartitionedModel
from .config import PostgresPartitioningConfig
from .constants import AUTO_PARTITIONED_COMMENT
from .error import PostgresPartitioningError
from .partition import PostgresPartition
from .plan import PostgresModelPartitioningPlan, PostgresPartitioningPlan
PartitionList = List[Tuple[PostgresPartitionedModel, List[PostgresPartition]]]
class PostgresPartitioningManager:
"""Helps managing partitions by automatically creating new partitions and
deleting old ones according to the configuration."""
def __init__(self, configs: List[PostgresPartitioningConfig]) -> None:
self.configs = configs
self._validate_configs(self.configs)
def plan(
self,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> PostgresPartitioningPlan:
"""Plans which partitions should be deleted/created.
Arguments:
skip_create:
If set to True, no partitions will be marked
for creation, regardless of the configuration.
skip_delete:
If set to True, no partitions will be marked
for deletion, regardless of the configuration.
using:
Name of the database connection to use.
Returns:
A plan describing what partitions would be created
and deleted if the plan is applied.
"""
model_plans = []
for config in self.configs:
model_plan = self._plan_for_config(
config,
skip_create=skip_create,
skip_delete=skip_delete,
using=using,
)
if not model_plan:
continue
model_plans.append(model_plan)
return PostgresPartitioningPlan(model_plans)
def find_config_for_model(
self, model: PostgresPartitionedModel
) -> Optional[PostgresPartitioningConfig]:
"""Finds the partitioning config for the specified model."""
return next(
(config for config in self.configs if config.model == model), None
)
def _plan_for_config(
self,
config: PostgresPartitioningConfig,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> Optional[PostgresModelPartitioningPlan]:
"""Creates a partitioning plan for one partitioning config."""
connection = connections[using or "default"]
table = self._get_partitioned_table(connection, config.model)
model_plan = PostgresModelPartitioningPlan(config)
if not skip_create:
for partition in config.strategy.to_create():
if table.partition_by_name(name=partition.name()):
continue
model_plan.creations.append(partition)
if not skip_delete:
for partition in config.strategy.to_delete():
introspected_partition = table.partition_by_name(
name=partition.name()
)
if not introspected_partition:
break
if introspected_partition.comment != AUTO_PARTITIONED_COMMENT:
continue
model_plan.deletions.append(partition)
if len(model_plan.creations) == 0 and len(model_plan.deletions) == 0:
return None
return model_plan
@staticmethod
def _get_partitioned_table(connection, model: PostgresPartitionedModel):
with connection.cursor() as cursor:
table = connection.introspection.get_partitioned_table(
cursor, model._meta.db_table
)
if not table:
raise PostgresPartitioningError(
f"Model {model.__name__}, with table "
f"{model._meta.db_table} does not exists in the "
"database. Did you run `python manage.py migrate`?"
)
return table
@staticmethod
def _validate_configs(configs: List[PostgresPartitioningConfig]):
"""Ensures there is only one config per model."""
models = set([config.model.__name__ for config in configs])
if len(models) != len(configs):
raise PostgresPartitioningError(
"Only one partitioning config per model is allowed"
)
| mit | 6,080,695,867,918,829,000 | 32.124088 | 78 | 0.612164 | false |
nealegibson/Infer | src/GPKernelFunctions.py | 1 | 10701 | """
Useful (non-periodic) GP Kernel Functions
"""
import numpy as np
import scipy.spatial
from scipy.special import gamma,kv
###################################################################################################
#Exponential class
def SqExponentialRad(X,Y,theta,white_noise=False):
"""
Standard squared exponential function (just one length parameter).
k(x,x') = th0^2 * exp( - 1/2*th1^2 Sum_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1] - inverse length scale (1/2l^2)
theta[2] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix without scaling
D2 = EuclideanDist2(X,Y)
#Calculate covariance matrix
K = theta[0]**2 * np.exp( - D2 / (2*(theta[1]**2)) )
#Add white noise
if white_noise == True: K += (np.identity(X[:,0].size) * (theta[2]**2))
return np.matrix(K)
def SqExponentialARD(X,Y,theta,white_noise=False):
"""
ARD squared exponential function
(with n inverse length scale for each input in X vectors).
k(x,x') = th0^2 * exp( -Sum_i n_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1:-1] - inverse length scales (1/2l_i^2) for each input vector in X,Y
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matrices
"""
#Calculate distance matrix with scaling - multiply each coord by sqrt(eta)
#n(x_i-x_j)^2 = (sqrt(n)*x_i-sqrt(n)*x_j)^2
D2 = EuclideanDist2(X,Y,v=np.sqrt(np.abs(np.array(theta[1:-1]))))
#Calculate covariance matrix (leave out the factor of 1/2)
K = theta[0]**2 * np.exp( -D2 )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
def SqExponential(X,Y,theta,white_noise=False):
"""
ARD squared exponential function
(with n length scales for each input in X vectors).
k(x,x') = th0^2 * exp( -Sum_i n_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1:-1] - inverse length scales (1/2l_i^2) for each input vector in X,Y
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling - multiply each coord by sqrt(eta)
#n(x_i-x_j)^2 = (sqrt(n)*x_i-sqrt(n)*x_j)^2
D2 = EuclideanDist2(X,Y,v=1./(np.array(theta[1:-1])*np.sqrt(2.)))
#Calculate covariance matrix (leave out the factor of 1/2)
K = theta[0]**2 * np.exp( -D2 )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
def ExponentialRad(X,Y,theta,white_noise=False):
"""
Standard Exponential function (with single length scale).
k(x,x') = th0^2 * exp( - 1/2*th1^2 Sum_i * (x_i-x_i') ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1] - inverse length scale (1/2l^2)
theta[2] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling
D = EuclideanDist(X,Y,v=None)
#Calculate covariance matrix
K = theta[0]**2 * np.exp( - D / (2*(theta[1]**2)) )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
def ExponentialARD(X,Y,theta,white_noise=False):
"""
ARD squared exponential function
(with n inverse length vector for each input in X vectors).
k(x,x') = th0^2 * exp( -Sum_i n_i * (x_i-x_i') ) [+ sigma^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1:-1] - inverse length scales (1/2l_i^2) for each input vector in X,Y
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling
D = EuclideanDist(X,Y,v=np.sqrt(np.abs(np.array(theta[1:-1]))))
#Calculate covariance matrix
K = theta[0]**2 * np.exp( - D / 2 )
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
####################################################################################################
def SqExponentialSum(X,Y,theta,white_noise=False):
"""
Squared exponential function with independent basis components
(with height scale and inverse length scale for each input in X vectors).
k(x,x') = Sum th_i^2 * exp( n_i * (x_i-x_i')^2 ) [+ sigma^2 delta_']
theta[0+n,1+n] - height scale, inverse length scale pairs
theta[-1] - white noise standard deviation if white_noise=True
X,Y - input matricies
"""
#Calculate distance matrix with scaling - multiply each coord by sqrt(eta)
m,n = X.shape
#ensure inputs are matrices - otherwise EuclideanDist fails for 1D
assert type(X) is np.matrixlib.defmatrix.matrix
assert type(Y) is np.matrixlib.defmatrix.matrix
K = np.zeros((m,m))
#sum over the input vectors
for i in range(n):
D2 = EuclideanDist2( np.mat(X[:,i]),np.mat(Y[:,i]),v=[np.sqrt(np.abs(theta[2*i+1]))])
K += theta[2*i]**2 * np.exp( -D2 )
#Add white noise
if white_noise == True: K += np.identity(m) * (theta[-1]**2)
return np.matrix(K)
####################################################################################################
#Rational quadratic - not tested
def RationalQuadRad(X, Y, theta, white_noise = False):
"""
Rational quadratic kernel (radial) - not fully tested
k(x,x') = th0^2 * (1 + (x_i-x_i')^2/2th1*th2^2)^-th1) [+ th5^2 delta_']
theta[0] - sqrt maximum covariance parameter - gives 1sigma prior dist size
theta[1] - alpha
theta[2] - length scale
theta[3] - white noise standard deviation if white_noise=True
"""
# Calculate distance matrix without scaling
D2 = EuclideanDist2(X, Y)
# Calculate covariance matrix
K = theta[0]**2 * (1 + (D2 / (2.*theta[1]*(theta[2]**2.)) ) )**(-theta[1])
# Add white noise
if white_noise == True: K += (np.identity(X[:,0].size) * (theta[3]**2))
return np.matrix(K)
####################################################################################################
#Matern class of covariance functions - not tested
def MaternRad(X,Y,theta,white_noise=False):
"""
Matern covariance kernel - not properly tested!
Radial - ie same length scales in all inputs
"""
#Calculate distance matrix with (global) scaling
D = EuclideanDist(X,Y) / theta[2]
#Calculate covariance matrix from matern function
v = theta[1]
K = 2.**(1.-v) / gamma(v) * (np.sqrt(2.*v)*D)**v * kv(v,np.sqrt(2.*v)*D)
#diagonal terms should be set to one (when D2 = 0, kv diverges but full function = 1)
#this only works for square 'covariance' matrix...
#ie fails for blocks..;
# K[np.where(np.identity(X[:,0].size)==1)] = 1.
#this should work, but again needs tested properly...
K[np.where(D==0.)] = 1.
#now multiply by an overall scale function
K = K * theta[0]**2
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[3]**2)
return np.matrix(K)
#matern kernel for v=3/2 fixed - rougher than sq exponential
def MAT_Kernel32(X,Y,theta,white_noise=False):
"""
Matern covariance kernel for 3/2 shape parameter
theta[0] - overall scale param - ie prior covariance
theta[1] - length scale
theta[2] - white noise
"""
D = EuclideanDist(X,Y) / theta[1]
K = theta[0]**2 * (1 + np.sqrt(3.)*D) * np.exp(-np.sqrt(3.)*D)
if white_noise == True: K += np.identity(X[:,0].size) * (theta[2]**2)
return np.matrix(K)
#matern kernel for v=5/2 fixed - rougher than sq exponential, smoother than above
#3/2 process
def MAT_Kernel52(X,Y,theta,white_noise=False):
"""
Matern covariance kernel for 5/2 shape parameter
theta[0] - overall scale param - ie prior covariance
theta[1] - length scale
theta[2] - white noise
"""
D = EuclideanDist(X,Y) / theta[1]
K = theta[0]**2 * (1 + np.sqrt(5.)*D + 5./3.*(D**2)) * np.exp(-np.sqrt(5.)*D)
if white_noise == True: K += np.identity(X[:,0].size) * (theta[2]**2)
return np.matrix(K)
def MaternARD(X,Y,theta,white_noise=False):
"""
Matern covariance kernel - not fully tested!
different length scales in all inputs
theta[0] - overall scale param - ie prior covariance
theta[1] - shape parameter
theta[2:-1] - length scales
theta[-1] - white noise
"""
#Calculate distance matrix with scaling
D = EuclideanDist(X,Y,v=theta[2:-1])
#Calculate covariance matrix from matern function
v = theta[1]
K = 2**(1.-v) / gamma(v) * (np.sqrt(2*v)*D)**v * kv(v,np.sqrt(2*v)*D)
#diagonal terms should be set to one (when D2 = 0, kv diverges but full function = 1)
#this only works for square 'covariance' matrix...
#ie fails for blocks..;
# K[np.where(np.identity(X[:,0].size)==1)] = 1.
#this should work, but again needs tested properly...
K[np.where(D==0.)] = 1.
#now multiply by an overall scale function
K = K * theta[0]
#Add white noise
if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2)
return np.matrix(K)
####################################################################################################
#Auxilary functions to compute euclidean distances
def EuclideanDist(X1,X2,v=None):
"""
Calculate the distance matrix for 2 data matricies
X1 - n x D input matrix
X2 - m x D input matrix
v - weight vector
D - output an n x m matrix of dist = sqrt( Sum_i (1/l_i^2) * (x_i - x'_i)^2 )
"""
#ensure inputs are in matrix form
X1,X2 = np.matrix(X1), np.matrix(X2)
if v is not None: #scale each coord in Xs by the weight vector
V = np.abs(np.matrix( np.diag(v) ))
X1 = X1 * V
X2 = X2 * V
#calculate sqaured euclidean distance (after weighting)
D = scipy.spatial.distance.cdist( X1, X2, 'euclidean')
return D
def EuclideanDist2(X1,X2,v=None):
"""
Calculate the distance matrix squared for 2 data matricies
X1 - n x D input matrix
X2 - m x D input matrix
v - weight vector
D2 - output an n x m matrix of dist^2 = Sum_i (1/l_i^2) * (x_i - x'_i)^2
"""
#ensure inputs are in matrix form
X1,X2 = np.matrix(X1), np.matrix(X2)
if v is not None: #scale each coord in Xs by the weight vector
V = np.abs(np.matrix( np.diag(v) ))
X1 = X1 * V
X2 = X2 * V
#calculate sqaured euclidean distance (after weighting)
D2 = scipy.spatial.distance.cdist( X1, X2, 'sqeuclidean' )
return D2
####################################################################################################
| gpl-3.0 | -2,814,045,030,075,659,300 | 30.289474 | 100 | 0.605271 | false |
dsajkl/reqiop | common/lib/xmodule/xmodule/progress.py | 127 | 4896 | '''
Progress class for modules. Represents where a student is in a module.
Useful things to know:
- Use Progress.to_js_status_str() to convert a progress into a simple
status string to pass to js.
- Use Progress.to_js_detail_str() to convert a progress into a more detailed
string to pass to js.
In particular, these functions have a canonical handing of None.
For most subclassing needs, you should only need to reimplement
frac() and __str__().
'''
import numbers
class Progress(object):
'''Represents a progress of a/b (a out of b done)
a and b must be numeric, but not necessarily integer, with
0 <= a <= b and b > 0.
Progress can only represent Progress for modules where that makes sense. Other
modules (e.g. html) should return None from get_progress().
TODO: add tag for module type? Would allow for smarter merging.
'''
def __init__(self, a, b):
'''Construct a Progress object. a and b must be numbers, and must have
0 <= a <= b and b > 0
'''
# Want to do all checking at construction time, so explicitly check types
if not (isinstance(a, numbers.Number) and
isinstance(b, numbers.Number)):
raise TypeError('a and b must be numbers. Passed {0}/{1}'.format(a, b))
if a > b:
a = b
if a < 0:
a = 0
if b <= 0:
raise ValueError('fraction a/b = {0}/{1} must have b > 0'.format(a, b))
self._a = a
self._b = b
def frac(self):
''' Return tuple (a,b) representing progress of a/b'''
return (self._a, self._b)
def percent(self):
''' Returns a percentage progress as a float between 0 and 100.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return 100.0 * a / b
def started(self):
''' Returns True if fractional progress is greater than 0.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
return self.frac()[0] > 0
def inprogress(self):
''' Returns True if fractional progress is strictly between 0 and 1.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a > 0 and a < b
def done(self):
''' Return True if this represents done.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a == b
def ternary_str(self):
''' Return a string version of this progress: either
"none", "in_progress", or "done".
subclassing note: implemented in terms of frac()
'''
(a, b) = self.frac()
if a == 0:
return "none"
if a < b:
return "in_progress"
return "done"
def __eq__(self, other):
''' Two Progress objects are equal if they have identical values.
Implemented in terms of frac()'''
if not isinstance(other, Progress):
return False
(a, b) = self.frac()
(a2, b2) = other.frac()
return a == a2 and b == b2
def __ne__(self, other):
''' The opposite of equal'''
return not self.__eq__(other)
def __str__(self):
''' Return a string representation of this string.
subclassing note: implemented in terms of frac().
'''
(a, b) = self.frac()
return "{0}/{1}".format(a, b)
@staticmethod
def add_counts(a, b):
'''Add two progress indicators, assuming that each represents items done:
(a / b) + (c / d) = (a + c) / (b + d).
If either is None, returns the other.
'''
if a is None:
return b
if b is None:
return a
# get numerators + denominators
(n, d) = a.frac()
(n2, d2) = b.frac()
return Progress(n + n2, d + d2)
@staticmethod
def to_js_status_str(progress):
'''
Return the "status string" version of the passed Progress
object that should be passed to js. Use this function when
sending Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return progress.ternary_str()
@staticmethod
def to_js_detail_str(progress):
'''
Return the "detail string" version of the passed Progress
object that should be passed to js. Use this function when
passing Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return str(progress)
| agpl-3.0 | -8,059,264,056,538,215,000 | 29.409938 | 84 | 0.571078 | false |
tdr130/pupy | pupy/modules/socks5proxy.py | 6 | 7573 | # -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
#RFC @https://www.ietf.org/rfc/rfc1928.txt
from pupylib.PupyModule import *
import StringIO
import pupylib.utils
import SocketServer
import threading
import socket
import logging
import struct
import traceback
import time
__class_name__="Socks5Proxy"
CODE_SUCCEEDED='\x00'
CODE_GENERAL_SRV_FAILURE='\x01'
CODE_CONN_NOT_ALLOWED='\x02'
CODE_NET_NOT_REACHABLE='\x03'
CODE_HOST_UNREACHABLE='\x04'
CODE_CONN_REFUSED='\x05'
CODE_TTL_EXPIRED='\x06'
CODE_COMMAND_NOT_SUPPORTED='\x07'
CODE_ADDRESS_TYPE_NOT_SUPPORTED='\x08'
CODE_UNASSIGNED='\x09'
class SocketPiper(threading.Thread):
def __init__(self, read_sock, write_sock):
threading.Thread.__init__(self)
self.daemon=True
self.read_sock=read_sock
self.write_sock=write_sock
def run(self):
try:
self.read_sock.setblocking(0)
while True:
data=""
try:
data+=self.read_sock.recv(1000000)
if not data:
break
except Exception as e:
if e[0]==9:#errno connection closed
break
if not data:
time.sleep(0.05)
continue
self.write_sock.sendall(data)
except Exception as e:
logging.debug("error in socket piper: %s"%str(traceback.format_exc()))
finally:
try:
self.write_sock.shutdown(socket.SHUT_RDWR)
self.write_sock.close()
except Exception:
pass
try:
self.read_sock.shutdown(socket.SHUT_RDWR)
self.read_sock.close()
except Exception:
pass
logging.debug("piper finished")
class Socks5RequestHandler(SocketServer.BaseRequestHandler):
def _socks_response(self, code, terminate=False):
ip="".join([chr(int(i)) for i in self.server.server_address[0].split(".")])
port=struct.pack("!H",self.server.server_address[1])
self.request.sendall("\x05"+code+"\x00"+"\x01"+ip+port)
if terminate:
self.request.shutdown(socket.SHUT_RDWR)
self.request.close()
def handle(self):
self.request.settimeout(5)
VER=self.request.recv(1)
NMETHODS=self.request.recv(1)
METHODS=self.request.recv(int(struct.unpack("!B",NMETHODS)[0]))
"""
o X'00' NO AUTHENTICATION REQUIRED
o X'01' GSSAPI
o X'02' USERNAME/PASSWORD
o X'03' to X'7F' IANA ASSIGNED
o X'80' to X'FE' RESERVED FOR PRIVATE METHODS
o X'FF' NO ACCEPTABLE METHODS
"""
#for now only no authentication is supported :
self.request.sendall("\x05\x00")
VER=self.request.recv(1)
if VER!="\x05":
logging.debug("receiving unsuported socks version: %s"%VER.encode('hex'))
self._socks_response(CODE_GENERAL_SRV_FAILURE, terminate=True)
return
CMD=self.request.recv(1)
if CMD!="\x01": # we only support CONNECT for now
logging.debug("receiving unsuported socks CMD: %s"%CMD.encode('hex'))
self._socks_response(CODE_COMMAND_NOT_SUPPORTED, terminate=True)
return
RSV=self.request.recv(1)
DST_ADDR=None
DST_PORT=None
ATYP=self.request.recv(1)
if ATYP=="\x01":
DST_ADDR=".".join([str(ord(x)) for x in self.request.recv(4)])
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
elif ATYP=="\x03":
DOMAIN_LEN=int(struct.unpack("!B",self.request.recv(1))[0])
DST_ADDR=self.request.recv(DOMAIN_LEN)
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
else: #TODO: ipv6
logging.debug("atyp not supported: %s"%ATYP.encode('hex'))
self._socks_response(CODE_ADDRESS_TYPE_NOT_SUPPORTED, terminate=True)
return
#now we have all we need, we can open the socket proxyfied through rpyc :)
logging.debug("connecting to %s:%s through the rpyc client"%(DST_ADDR,DST_PORT))
rsocket_mod=self.server.rpyc_client.conn.modules.socket
rsocket=rsocket_mod.socket(rsocket_mod.AF_INET,rsocket_mod.SOCK_STREAM)
rsocket.settimeout(5)
try:
rsocket.connect((DST_ADDR, DST_PORT))
except Exception as e:
logging.debug("error: %s"%e)
if e[0]==10060:
logging.debug("unreachable !")
self._socks_response(CODE_HOST_UNREACHABLE, terminate=True)
else:
self._socks_response(CODE_NET_NOT_REACHABLE, terminate=True)
return
self._socks_response(CODE_SUCCEEDED)
logging.debug("connection succeeded !")
#self.request.settimeout(30)
#rsocket.settimeout(30)
sp1=SocketPiper(self.request, rsocket)
sp2=SocketPiper(rsocket, self.request)
sp1.start()
sp2.start()
sp1.join()
sp2.join()
logging.debug("conn to %s:%s closed"%(DST_ADDR,DST_PORT))
class Socks5Server(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True, rpyc_client=None):
self.rpyc_client=rpyc_client
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
class ThreadedSocks5Server(SocketServer.ThreadingMixIn, Socks5Server):
pass
class Socks5Proxy(PupyModule):
""" start a socks5 proxy gooing through a client """
max_clients=1
unique_instance=True
daemon=True
server=None
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='socks5proxy', description=self.__doc__)
self.arg_parser.add_argument('-p', '--port', default='1080')
self.arg_parser.add_argument('action', choices=['start','stop'])
def stop_daemon(self):
self.success("shuting down socks server ...")
if self.server:
self.server.shutdown()
del self.server
self.success("socks server shut down")
else:
self.error("server is None")
def run(self, args):
if args.action=="start":
if self.server is None:
self.success("starting server ...")
self.server = ThreadedSocks5Server(("127.0.0.1", int(args.port)), Socks5RequestHandler, rpyc_client=self.client)
t=threading.Thread(target=self.server.serve_forever)
t.daemon=True
t.start()
self.success("socks5 server started on 127.0.0.1:%s"%args.port)
else:
self.error("socks5 server is already started !")
elif args.action=="stop":
if self.server:
self.job.stop()
del self.job
self.success("socks5 server stopped !")
else:
self.error("socks5 server is already stopped")
| bsd-3-clause | -2,831,836,035,528,928,000 | 34.890995 | 756 | 0.710022 | false |
darshanthaker/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/afm.py | 69 | 15057 | """
This is a python interface to Adobe Font Metrics Files. Although a
number of other python implementations exist (and may be more complete
than mine) I decided not to go with them because either they were
either
1) copyrighted or used a non-BSD compatible license
2) had too many dependencies and I wanted a free standing lib
3) Did more than I needed and it was easier to write my own than
figure out how to just get what I needed from theirs
It is pretty easy to use, and requires only built-in python libs::
>>> from afm import AFM
>>> fh = file('ptmr8a.afm')
>>> afm = AFM(fh)
>>> afm.string_width_height('What the heck?')
(6220.0, 683)
>>> afm.get_fontname()
'Times-Roman'
>>> afm.get_kern_dist('A', 'f')
0
>>> afm.get_kern_dist('A', 'y')
-92.0
>>> afm.get_bbox_char('!')
[130, -9, 238, 676]
>>> afm.get_bbox_font()
[-168, -218, 1000, 898]
AUTHOR:
John D. Hunter <[email protected]>
"""
import sys, os, re
from _mathtext_data import uni2type1
#Convert string the a python type
_to_int = int
_to_float = float
_to_str = str
def _to_list_of_ints(s):
s = s.replace(',', ' ')
return [_to_int(val) for val in s.split()]
def _to_list_of_floats(s):
return [_to_float(val) for val in s.split()]
def _to_bool(s):
if s.lower().strip() in ('false', '0', 'no'): return False
else: return True
def _sanity_check(fh):
"""
Check if the file at least looks like AFM.
If not, raise :exc:`RuntimeError`.
"""
# Remember the file position in case the caller wants to
# do something else with the file.
pos = fh.tell()
try:
line = fh.readline()
finally:
fh.seek(pos, 0)
# AFM spec, Section 4: The StartFontMetrics keyword [followed by a
# version number] must be the first line in the file, and the
# EndFontMetrics keyword must be the last non-empty line in the
# file. We just check the first line.
if not line.startswith('StartFontMetrics'):
raise RuntimeError('Not an AFM file')
def _parse_header(fh):
"""
Reads the font metrics header (up to the char metrics) and returns
a dictionary mapping *key* to *val*. *val* will be converted to the
appropriate python type as necessary; eg:
* 'False'->False
* '0'->0
* '-168 -218 1000 898'-> [-168, -218, 1000, 898]
Dictionary keys are
StartFontMetrics, FontName, FullName, FamilyName, Weight,
ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition,
UnderlineThickness, Version, Notice, EncodingScheme, CapHeight,
XHeight, Ascender, Descender, StartCharMetrics
"""
headerConverters = {
'StartFontMetrics': _to_float,
'FontName': _to_str,
'FullName': _to_str,
'FamilyName': _to_str,
'Weight': _to_str,
'ItalicAngle': _to_float,
'IsFixedPitch': _to_bool,
'FontBBox': _to_list_of_ints,
'UnderlinePosition': _to_int,
'UnderlineThickness': _to_int,
'Version': _to_str,
'Notice': _to_str,
'EncodingScheme': _to_str,
'CapHeight': _to_float, # Is the second version a mistake, or
'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS
'XHeight': _to_float,
'Ascender': _to_float,
'Descender': _to_float,
'StdHW': _to_float,
'StdVW': _to_float,
'StartCharMetrics': _to_int,
'CharacterSet': _to_str,
'Characters': _to_int,
}
d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if line.startswith('Comment'): continue
lst = line.split( ' ', 1 )
#print '%-s\t%-d line :: %-s' % ( fh.name, len(lst), line )
key = lst[0]
if len( lst ) == 2:
val = lst[1]
else:
val = ''
#key, val = line.split(' ', 1)
try: d[key] = headerConverters[key](val)
except ValueError:
print >>sys.stderr, 'Value error parsing header in AFM:', key, val
continue
except KeyError:
print >>sys.stderr, 'Found an unknown keyword in AFM header (was %s)' % key
continue
if key=='StartCharMetrics': return d
raise RuntimeError('Bad parse')
def _parse_char_metrics(fh):
"""
Return a character metric dictionary. Keys are the ASCII num of
the character, values are a (*wx*, *name*, *bbox*) tuple, where
*wx* is the character width, *name* is the postscript language
name, and *bbox* is a (*llx*, *lly*, *urx*, *ury*) tuple.
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
ascii_d = {}
name_d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if line.startswith('EndCharMetrics'): return ascii_d, name_d
vals = line.split(';')[:4]
if len(vals) !=4 : raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals[0].split()[1])
wx = _to_float(vals[1].split()[1])
name = vals[2].split()[1]
bbox = _to_list_of_ints(vals[3][2:])
# Workaround: If the character name is 'Euro', give it the corresponding
# character code, according to WinAnsiEncoding (see PDF Reference).
if name == 'Euro':
num = 128
if num != -1:
ascii_d[num] = (wx, name, bbox)
name_d[name] = (wx, bbox)
raise RuntimeError('Bad parse')
def _parse_kern_pairs(fh):
"""
Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and
values are the kern pair value. For example, a kern pairs line like
``KPX A y -50``
will be represented as::
d[ ('A', 'y') ] = -50
"""
line = fh.readline()
if not line.startswith('StartKernPairs'):
raise RuntimeError('Bad start of kern pairs data: %s'%line)
d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if len(line)==0: continue
if line.startswith('EndKernPairs'):
fh.readline() # EndKernData
return d
vals = line.split()
if len(vals)!=4 or vals[0]!='KPX':
raise RuntimeError('Bad kern pairs line: %s'%line)
c1, c2, val = vals[1], vals[2], _to_float(vals[3])
d[(c1,c2)] = val
raise RuntimeError('Bad kern pairs parse')
def _parse_composites(fh):
"""
Return a composites dictionary. Keys are the names of the
composites. Values are a num parts list of composite information,
with each element being a (*name*, *dx*, *dy*) tuple. Thus a
composites line reading:
CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ;
will be represented as::
d['Aacute'] = [ ('A', 0, 0), ('acute', 160, 170) ]
"""
d = {}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if len(line)==0: continue
if line.startswith('EndComposites'):
return d
vals = line.split(';')
cc = vals[0].split()
name, numParts = cc[1], _to_int(cc[2])
pccParts = []
for s in vals[1:-1]:
pcc = s.split()
name, dx, dy = pcc[1], _to_float(pcc[2]), _to_float(pcc[3])
pccParts.append( (name, dx, dy) )
d[name] = pccParts
raise RuntimeError('Bad composites parse')
def _parse_optional(fh):
"""
Parse the optional fields for kern pair data and composites
return value is a (*kernDict*, *compositeDict*) which are the
return values from :func:`_parse_kern_pairs`, and
:func:`_parse_composites` if the data exists, or empty dicts
otherwise
"""
optional = {
'StartKernData' : _parse_kern_pairs,
'StartComposites' : _parse_composites,
}
d = {'StartKernData':{}, 'StartComposites':{}}
while 1:
line = fh.readline()
if not line: break
line = line.rstrip()
if len(line)==0: continue
key = line.split()[0]
if key in optional: d[key] = optional[key](fh)
l = ( d['StartKernData'], d['StartComposites'] )
return l
def parse_afm(fh):
"""
Parse the Adobe Font Metics file in file handle *fh*. Return value
is a (*dhead*, *dcmetrics*, *dkernpairs*, *dcomposite*) tuple where
*dhead* is a :func:`_parse_header` dict, *dcmetrics* is a
:func:`_parse_composites` dict, *dkernpairs* is a
:func:`_parse_kern_pairs` dict (possibly {}), and *dcomposite* is a
:func:`_parse_composites` dict (possibly {})
"""
_sanity_check(fh)
dhead = _parse_header(fh)
dcmetrics_ascii, dcmetrics_name = _parse_char_metrics(fh)
doptional = _parse_optional(fh)
return dhead, dcmetrics_ascii, dcmetrics_name, doptional[0], doptional[1]
class AFM:
def __init__(self, fh):
"""
Parse the AFM file in file object *fh*
"""
(dhead, dcmetrics_ascii, dcmetrics_name, dkernpairs, dcomposite) = \
parse_afm(fh)
self._header = dhead
self._kern = dkernpairs
self._metrics = dcmetrics_ascii
self._metrics_by_name = dcmetrics_name
self._composite = dcomposite
def get_bbox_char(self, c, isord=False):
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return bbox
def string_width_height(self, s):
"""
Return the string width (including kerning) and string height
as a (*w*, *h*) tuple.
"""
if not len(s): return 0,0
totalw = 0
namelast = None
miny = 1e9
maxy = 0
for c in s:
if c == '\n': continue
wx, name, bbox = self._metrics[ord(c)]
l,b,w,h = bbox
# find the width with kerning
try: kp = self._kern[ (namelast, name) ]
except KeyError: kp = 0
totalw += wx + kp
# find the max y
thismax = b+h
if thismax>maxy: maxy = thismax
# find the min y
thismin = b
if thismin<miny: miny = thismin
return totalw, maxy-miny
def get_str_bbox_and_descent(self, s):
"""
Return the string bounding box
"""
if not len(s): return 0,0,0,0
totalw = 0
namelast = None
miny = 1e9
maxy = 0
left = 0
if not isinstance(s, unicode):
s = s.decode()
for c in s:
if c == '\n': continue
name = uni2type1.get(ord(c), 'question')
try:
wx, bbox = self._metrics_by_name[name]
except KeyError:
name = 'question'
wx, bbox = self._metrics_by_name[name]
l,b,w,h = bbox
if l<left: left = l
# find the width with kerning
try: kp = self._kern[ (namelast, name) ]
except KeyError: kp = 0
totalw += wx + kp
# find the max y
thismax = b+h
if thismax>maxy: maxy = thismax
# find the min y
thismin = b
if thismin<miny: miny = thismin
return left, miny, totalw, maxy-miny, -miny
def get_str_bbox(self, s):
"""
Return the string bounding box
"""
return self.get_str_bbox_and_descent(s)[:4]
def get_name_char(self, c, isord=False):
"""
Get the name of the character, ie, ';' is 'semicolon'
"""
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return name
def get_width_char(self, c, isord=False):
"""
Get the width of the character from the character metric WX
field
"""
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return wx
def get_width_from_char_name(self, name):
"""
Get the width of the character from a type1 character name
"""
wx, bbox = self._metrics_by_name[name]
return wx
def get_height_char(self, c, isord=False):
"""
Get the height of character *c* from the bounding box. This
is the ink height (space is 0)
"""
if not isord: c=ord(c)
wx, name, bbox = self._metrics[c]
return bbox[-1]
def get_kern_dist(self, c1, c2):
"""
Return the kerning pair distance (possibly 0) for chars *c1*
and *c2*
"""
name1, name2 = self.get_name_char(c1), self.get_name_char(c2)
return self.get_kern_dist_from_name(name1, name2)
def get_kern_dist_from_name(self, name1, name2):
"""
Return the kerning pair distance (possibly 0) for chars
*name1* and *name2*
"""
try: return self._kern[ (name1, name2) ]
except: return 0
def get_fontname(self):
"Return the font name, eg, 'Times-Roman'"
return self._header['FontName']
def get_fullname(self):
"Return the font full name, eg, 'Times-Roman'"
name = self._header.get('FullName')
if name is None: # use FontName as a substitute
name = self._header['FontName']
return name
def get_familyname(self):
"Return the font family name, eg, 'Times'"
name = self._header.get('FamilyName')
if name is not None:
return name
# FamilyName not specified so we'll make a guess
name = self.get_fullname()
extras = r'(?i)([ -](regular|plain|italic|oblique|bold|semibold|light|ultralight|extra|condensed))+$'
return re.sub(extras, '', name)
def get_weight(self):
"Return the font weight, eg, 'Bold' or 'Roman'"
return self._header['Weight']
def get_angle(self):
"Return the fontangle as float"
return self._header['ItalicAngle']
def get_capheight(self):
"Return the cap height as float"
return self._header['CapHeight']
def get_xheight(self):
"Return the xheight as float"
return self._header['XHeight']
def get_underline_thickness(self):
"Return the underline thickness as float"
return self._header['UnderlineThickness']
def get_horizontal_stem_width(self):
"""
Return the standard horizontal stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get('StdHW', None)
def get_vertical_stem_width(self):
"""
Return the standard vertical stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get('StdVW', None)
if __name__=='__main__':
#pathname = '/usr/local/lib/R/afm/'
pathname = '/usr/local/share/fonts/afms/adobe'
for fname in os.listdir(pathname):
fh = file(os.path.join(pathname,fname))
afm = AFM(fh)
w,h = afm.string_width_height('John Hunter is the Man!')
| agpl-3.0 | 8,105,868,437,981,154,000 | 29.479757 | 109 | 0.561998 | false |
nejucomo/sgg | spiralgalaxygame/demiurge.py | 1 | 4151 | import random
from math import sin, pi
from spiralgalaxygame.body import Body, BodyKind
from spiralgalaxygame.geometry import Vector, Circle
from spiralgalaxygame.discdist import DiscreteDistribution
def generate_galaxy_bodies(randgen = random.random,
parentmu = 1000,
parentsigma = 100,
galacticradius = 1000,
spokes = 4,
spin = 3.8,
diffusion = 13.5,
tightness = 0.35):
for i in range(int(random.lognormvariate(parentmu, parentsigma))):
for body in generate_star(randgen, galacticradius, spokes, spin, diffusion, tightness):
yield body
# Notes:
#
# Parameters named u are expected to be uniform random samples on [0, 1).
#
# The "Standard Length Unit" is a convenient unit of measurement of
# game objects, where the smallest ships have a radius close to 1 SLU.
def generate_star(randgen, galacticradius, spokes, spin, diffusion, tightness):
"""randgen is a function which generates uniform random samples [0, 1)."""
(kind, tightnessfactor, minrad, radrange, childmu, childsigma) = select_star_info(randgen())
adjustedtightness = tightness * tightnessfactor
bdfc = select_body_distance(galacticradius, adjustedtightness, randgen())
angle = select_angle(spokes, diffusion, spin, bdfc, randgen())
bodyradius = select_star_radius(minrad, radrange, randgen())
circle = Circle(Vector.from_angle_and_radius(angle, bdfc), bodyradius)
parent = Body(kind, circle)
yield parent
for i in range(int(random.lognormvariate(childmu, childsigma))):
yield generate_child(randgen, circle)
def generate_child(randgen, pcircle):
(kind, solarradius, minrad, radrange) = select_child_info(randgen())
bdfp = pcircle.radius + select_body_distance(solarradius, tigthness = 0.5, u = randgen())
angle = randgen * 2 * pi
center = pcircle.center + Vector.from_angle_and_radius(angle, bdfp)
bodyradius = select_star_radius(minrad, radrange)
return Body(kind, Circle(center, bodyradius))
def select_body_distance(galacticradius, tightness, u):
"""Given galacticradius in SLUs, a tightness parameter, and a u sample, return a distance in SLUs."""
t = sin(0.5 * pi * u) ** tightness
k = (t + u**4) / 2
return galacticradius * k
def select_angle(spokes, diffusion, spin, bdfc, u):
"""Given spokes, diffusion, spin, and bdfc (body distance from core) and a u, return galactic angle."""
return select_base_angle(spokes, diffusion, spin, u) + spin * bdfc
def select_base_angle(spokes, diffusion, u):
factor = spokes * pi
a = sin(factor * u)
b = abs(a) ** diffusion
return u - b / factor
def select_star_radius(minradius, radiusrange, u):
return minradius + radiusrange * u**2
select_star_info = DiscreteDistribution(
# items: (kind, tightnessfactor, minrad, radrange, childmu, childsigma)
# Note: blue and green planets are never parent bodies.
(99, (BodyKind.star_white, 0.9, 100, 100, 0.35, 0.55)),
(60, (BodyKind.star_yellow, 0.85, 80, 200, 0.50, 0.50)),
(40, (BodyKind.star_red, 0.67, 40, 120, 0.20, 0.30)),
(7, (BodyKind.planet_grey, 1.0, 10, 80, -0.50, 0.11)),
(1, (BodyKind.planet_brown, 0.9, 30, 40, -0.40, 0.15)),
(10, (BodyKind.black_hole, 0.4, 1, 10, -0.30, 0.30)),
(17, (BodyKind.dust_cloud, 1.0, 80, 400, -1.00, 0.00)),
(13, (BodyKind.gas_cloud, 0.6, 80, 800, -1.00, 0.00)),
)
select_child_info = DiscreteDistribution(
# items: (kind, solarradius, minrad, radrange)
# Note: dust clouds and gas clouds are never children.
(1, (BodyKind.star_white, 1000, 60, 100)),
(1, (BodyKind.star_yellow, 1100, 50, 130)),
(2, (BodyKind.star_red, 1300, 20, 50)),
(100, (BodyKind.planet_blue, 1400, 10, 80)),
(120, (BodyKind.planet_grey, 2500, 10, 60)),
(90, (BodyKind.planet_green, 1200, 15, 60)),
(80, (BodyKind.planet_brown, 1800, 25, 30)),
(5, (BodyKind.black_hole, 700, 1, 6)),
)
| agpl-3.0 | 2,978,818,440,894,455,000 | 37.794393 | 107 | 0.634064 | false |
badjr/pysal | pysal/core/IOHandlers/mat.py | 20 | 4377 | import pysal
import os.path
import scipy.io as sio
import pysal.core.FileIO as FileIO
from pysal.weights import W
from pysal.weights.util import full, full2W
from warnings import warn
__author__ = "Myunghwa Hwang <[email protected]>"
__all__ = ["MatIO"]
class MatIO(FileIO.FileIO):
"""
Opens, reads, and writes weights file objects in MATLAB Level 4-5 MAT format.
MAT files are used in Dr. LeSage's MATLAB Econometrics library.
The MAT file format can handle both full and sparse matrices,
and it allows for a matrix dimension greater than 256.
In PySAL, row and column headers of a MATLAB array are ignored.
PySAL uses matlab io tools in scipy.
Thus, it is subject to all limits that loadmat and savemat in scipy have.
Notes
-----
If a given weights object contains too many observations to
write it out as a full matrix,
PySAL writes out the object as a sparse matrix.
References
----------
MathWorks (2011) "MATLAB 7 MAT-File Format" at
http://www.mathworks.com/help/pdf_doc/matlab/matfile_format.pdf.
scipy matlab io
http://docs.scipy.org/doc/scipy/reference/tutorial/io.html
"""
FORMATS = ['mat']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
self._varName = 'Unknown'
FileIO.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode + 'b')
def _set_varName(self, val):
if issubclass(type(val), basestring):
self._varName = val
def _get_varName(self):
return self._varName
varName = property(fget=_get_varName, fset=_set_varName)
def read(self, n=-1):
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
"""Reads MATLAB mat file
Returns a pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open a MATLAB mat file and read it into a pysal weights object
>>> w = pysal.open(pysal.examples.get_path('spat-sym-us.mat'),'r').read()
Get the number of observations from the header
>>> w.n
46
Get the mean number of neighbors
>>> w.mean_neighbors
4.0869565217391308
Get neighbor distances for a single observation
>>> w[1]
{25: 1, 3: 1, 28: 1, 39: 1}
"""
if self.pos > 0:
raise StopIteration
mat = sio.loadmat(self.file)
mat_keys = [k for k in mat if not k.startswith("_")]
full_w = mat[mat_keys[0]]
self.pos += 1
return full2W(full_w)
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a MATLAB mat file
write a weights object to the opened mat file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('spat-sym-us.mat'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.mat')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created mat file
>>> wnew = pysal.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
try:
w = full(obj)[0]
except ValueError:
w = obj.sparse
sio.savemat(self.file, {'WEIGHT': w})
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
FileIO.FileIO.close(self)
| bsd-3-clause | 425,294,960,405,319,200 | 24.011429 | 81 | 0.570939 | false |
rpoleski/MulensModel | examples/use_cases/use_case_14_coordinate_system.py | 1 | 1988 | """
Use Case 14 - jcy
MulensModel will be written assuming the coordinate system
(t0,u0,alpha) are defined relative to the center of mass. This is not
always the most efficient choice for fitting. This use case covers the
conversion from center of magnification coordinates to center of mass
coordinates. Essentially, it is the responsibility of the user to
convert from their MCMC coordinate system to the center of mass
coordinate system needed for the magnification calculation.
"""
import astropy.units as u
import numpy as np
import MulensModel as mm
raise NotImplementedError('frame_origin not implemented for Model')
def convert_cof_mag2mass(t0, te, u0, alpha, s, q):
"""
function to convert from center of magnification to center of mass
coordinates. Note that this function is for illustration only. It has
not been tested and may have sign errors.
"""
if s <= 1.0:
return t0, u0
else:
delta = q / (1. + q) / s
delta_u0 = delta * np.sin(alpha * np.pi / 180.)
delta_tau = delta * np.cos(alpha * np.pi / 180.)
t0_prime = t0 + delta_tau * te
u0_prime = u0 + delta_u0
return t0_prime, u0_prime
# Define model parameters in CoMAGN system
t0_center_of_mag = 7000.
u0_center_of_mag = 0.1
alpha_center_of_mag = 30.*u.deg
te = 30.
print('Center of magnification: {0}, {1}'.format(
t0_center_of_mag, u0_center_of_mag))
s = 1.1
q = 0.001
# Get parameters in CoMASS system
(t0_center_of_mass, u0_center_of_mass) = convert_cof_mag2mass(
t0_center_of_mag, te, u0_center_of_mag, alpha_center_of_mag, s, q)
print('Center of mass: {0}, {1}'.format(t0_center_of_mass, u0_center_of_mass))
# How does this get passed to a minimizer?
# Alternatively,
model = mm.Model(
{'t_0': 2457000., 'u_0': 0.1, 't_E': 30., 'rho': 0.001,
'alpha': 30*u.deg, 's': 1.1, 'q': 0.001},
frame_origin='magnification')
print(model.parameters.t_0, model.parameters.u_0)
| mit | 4,294,409,651,524,645,000 | 29.584615 | 78 | 0.666499 | false |
kmoocdev2/edx-platform | cms/djangoapps/models/settings/course_metadata.py | 1 | 8977 | """
Django module for Course Metadata class -- manages advanced settings and related parameters
"""
from django.conf import settings
from django.utils.translation import ugettext as _
from six import text_type
from xblock.fields import Scope
from xblock_django.models import XBlockStudioConfigurationFlag
from xmodule.modulestore.django import modulestore
class CourseMetadata(object):
'''
For CRUD operations on metadata fields which do not have specific editors
on the other pages including any user generated ones.
The objects have no predefined attrs but instead are obj encodings of the
editable metadata.
'''
# The list of fields that wouldn't be shown in Advanced Settings.
# Should not be used directly. Instead the filtered_list method should
# be used if the field needs to be filtered depending on the feature flag.
FILTERED_LIST = [
'cohort_config',
'xml_attributes',
'start',
'end',
'enrollment_start',
'enrollment_end',
'certificate_available_date',
'tabs',
'graceperiod',
'show_timezone',
'format',
'graded',
'hide_from_toc',
'pdf_textbooks',
'user_partitions',
'name', # from xblock
'tags', # from xblock
'visible_to_staff_only',
'group_access',
'pre_requisite_courses',
'entrance_exam_enabled',
'entrance_exam_minimum_score_pct',
'entrance_exam_id',
'is_entrance_exam',
'in_entrance_exam',
'language',
'certificates',
'minimum_grade_credit',
'default_time_limit_minutes',
'is_proctored_enabled',
'is_time_limited',
'is_practice_exam',
'exam_review_rules',
'hide_after_due',
'self_paced',
'show_correctness',
'chrome',
'default_tab',
'highlights_enabled_for_messaging',
]
@classmethod
def filtered_list(cls):
"""
Filter fields based on feature flag, i.e. enabled, disabled.
"""
# Copy the filtered list to avoid permanently changing the class attribute.
filtered_list = list(cls.FILTERED_LIST)
# Do not show giturl if feature is not enabled.
if not settings.FEATURES.get('ENABLE_EXPORT_GIT'):
filtered_list.append('giturl')
# Do not show edxnotes if the feature is disabled.
if not settings.FEATURES.get('ENABLE_EDXNOTES'):
filtered_list.append('edxnotes')
# Do not show video_upload_pipeline if the feature is disabled.
if not settings.FEATURES.get('ENABLE_VIDEO_UPLOAD_PIPELINE'):
filtered_list.append('video_upload_pipeline')
# Do not show video auto advance if the feature is disabled
if not settings.FEATURES.get('ENABLE_AUTOADVANCE_VIDEOS'):
filtered_list.append('video_auto_advance')
# Do not show social sharing url field if the feature is disabled.
if (not hasattr(settings, 'SOCIAL_SHARING_SETTINGS') or
not getattr(settings, 'SOCIAL_SHARING_SETTINGS', {}).get("CUSTOM_COURSE_URLS")):
filtered_list.append('social_sharing_url')
# Do not show teams configuration if feature is disabled.
if not settings.FEATURES.get('ENABLE_TEAMS'):
filtered_list.append('teams_configuration')
if not settings.FEATURES.get('ENABLE_VIDEO_BUMPER'):
filtered_list.append('video_bumper')
# Do not show enable_ccx if feature is not enabled.
if not settings.FEATURES.get('CUSTOM_COURSES_EDX'):
filtered_list.append('enable_ccx')
filtered_list.append('ccx_connector')
# Do not show "Issue Open Badges" in Studio Advanced Settings
# if the feature is disabled.
if not settings.FEATURES.get('ENABLE_OPENBADGES'):
filtered_list.append('issue_badges')
# If the XBlockStudioConfiguration table is not being used, there is no need to
# display the "Allow Unsupported XBlocks" setting.
if not XBlockStudioConfigurationFlag.is_enabled():
filtered_list.append('allow_unsupported_xblocks')
return filtered_list
@classmethod
def fetch(cls, descriptor):
"""
Fetch the key:value editable course details for the given course from
persistence and return a CourseMetadata model.
"""
result = {}
metadata = cls.fetch_all(descriptor)
for key, value in metadata.iteritems():
if key in cls.filtered_list():
continue
result[key] = value
return result
@classmethod
def fetch_all(cls, descriptor):
"""
Fetches all key:value pairs from persistence and returns a CourseMetadata model.
"""
result = {}
for field in descriptor.fields.values():
if field.scope != Scope.settings:
continue
field_help = _(field.help) # pylint: disable=translation-of-non-string
help_args = field.runtime_options.get('help_format_args')
if help_args is not None:
field_help = field_help.format(**help_args)
result[field.name] = {
'value': field.read_json(descriptor),
'display_name': _(field.display_name), # pylint: disable=translation-of-non-string
'help': field_help,
'deprecated': field.runtime_options.get('deprecated', False),
'hidden': field.runtime_options.get('hidden', False)
}
return result
@classmethod
def update_from_json(cls, descriptor, jsondict, user, filter_tabs=True):
"""
Decode the json into CourseMetadata and save any changed attrs to the db.
Ensures none of the fields are in the blacklist.
"""
filtered_list = cls.filtered_list()
# Don't filter on the tab attribute if filter_tabs is False.
if not filter_tabs:
filtered_list.remove("tabs")
# Validate the values before actually setting them.
key_values = {}
for key, model in jsondict.iteritems():
# should it be an error if one of the filtered list items is in the payload?
if key in filtered_list:
continue
try:
val = model['value']
if hasattr(descriptor, key) and getattr(descriptor, key) != val:
key_values[key] = descriptor.fields[key].from_json(val)
except (TypeError, ValueError) as err:
raise ValueError(_("Incorrect format for field '{name}'. {detailed_message}").format(
name=model['display_name'], detailed_message=text_type(err)))
return cls.update_from_dict(key_values, descriptor, user)
@classmethod
def validate_and_update_from_json(cls, descriptor, jsondict, user, filter_tabs=True):
"""
Validate the values in the json dict (validated by xblock fields from_json method)
If all fields validate, go ahead and update those values on the object and return it without
persisting it to the DB.
If not, return the error objects list.
Returns:
did_validate: whether values pass validation or not
errors: list of error objects
result: the updated course metadata or None if error
"""
filtered_list = cls.filtered_list()
if not filter_tabs:
filtered_list.remove("tabs")
filtered_dict = dict((k, v) for k, v in jsondict.iteritems() if k not in filtered_list)
did_validate = True
errors = []
key_values = {}
updated_data = None
for key, model in filtered_dict.iteritems():
try:
if key == 'need_lock':
continue
val = model['value']
if hasattr(descriptor, key) and getattr(descriptor, key) != val:
key_values[key] = descriptor.fields[key].from_json(val)
except (TypeError, ValueError) as err:
did_validate = False
errors.append({'message': text_type(err), 'model': model})
# If did validate, go ahead and update the metadata
if did_validate:
updated_data = cls.update_from_dict(key_values, descriptor, user, save=False)
return did_validate, errors, updated_data
@classmethod
def update_from_dict(cls, key_values, descriptor, user, save=True):
"""
Update metadata descriptor from key_values. Saves to modulestore if save is true.
"""
for key, value in key_values.iteritems():
setattr(descriptor, key, value)
if save and len(key_values):
modulestore().update_item(descriptor, user.id)
return cls.fetch(descriptor)
| agpl-3.0 | 1,728,035,875,643,045,600 | 36.560669 | 101 | 0.605993 | false |
rebstar6/servo | python/servo/devenv_commands.py | 5 | 6435 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
from os import path, getcwd, listdir
import subprocess
import sys
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd, call
@CommandProvider
class MachCommands(CommandBase):
@Command('cargo',
description='Run Cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to Cargo")
def cargo(self, params):
if not params:
params = []
if self.context.topdir == getcwd():
with cd(path.join('components', 'servo')):
return call(["cargo"] + params, env=self.build_env())
return call(['cargo'] + params, env=self.build_env())
@Command('cargo-update',
description='Same as update-cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages')
def cargo_update(self, params=None, package=None, all_packages=None):
self.update_cargo(params, package, all_packages)
@Command('update-cargo',
description='Update Cargo dependencies',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates the selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages. NOTE! This is very likely to break your ' +
'working copy, making it impossible to build servo. Only do ' +
'this if you really know what you are doing.')
def update_cargo(self, params=None, package=None, all_packages=None):
if not params:
params = []
if package:
params += ["-p", package]
elif all_packages:
params = []
else:
print("Please choose package to update with the --package (-p) ")
print("flag or update all packages with --all-packages (-a) flag")
sys.exit(1)
cargo_paths = [path.join('components', 'servo'),
path.join('ports', 'cef'),
path.join('ports', 'geckolib')]
for cargo_path in cargo_paths:
with cd(cargo_path):
print(cargo_path)
call(["cargo", "update"] + params,
env=self.build_env())
@Command('clippy',
description='Run Clippy',
category='devenv')
def clippy(self):
features = "--features=script/plugins/clippy"
with cd(path.join(self.context.topdir, "components", "servo")):
return subprocess.call(["cargo", "build", features],
env=self.build_env())
@Command('rustc',
description='Run the Rust compiler',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to rustc")
def rustc(self, params):
if params is None:
params = []
return call(["rustc"] + params, env=self.build_env())
@Command('rust-root',
description='Print the path to the root of the Rust compiler',
category='devenv')
def rust_root(self):
print(self.config["tools"]["rust-root"])
@Command('grep',
description='`git grep` for selected directories.',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to `git grep`")
def grep(self, params):
if not params:
params = []
# get all directories under tests/
tests_dirs = listdir('tests')
# Directories to be excluded under tests/
excluded_tests_dirs = ['wpt', 'jquery']
tests_dirs = filter(lambda dir: dir not in excluded_tests_dirs, tests_dirs)
# Set of directories in project root
root_dirs = ['components', 'ports', 'python', 'etc', 'resources']
# Generate absolute paths for directories in tests/ and project-root/
tests_dirs_abs = [path.join(self.context.topdir, 'tests', s) for s in tests_dirs]
root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs]
# Absolute paths for all directories to be considered
grep_paths = root_dirs_abs + tests_dirs_abs
return call(
["git"] + ["grep"] + params + ['--'] + grep_paths + [':(exclude)*.min.js'],
env=self.build_env())
@Command('wpt-upgrade',
description='upgrade wptrunner.',
category='devenv')
def upgrade_wpt_runner(self):
with cd(path.join(self.context.topdir, 'tests', 'wpt', 'harness')):
code = call(["git", "init"], env=self.build_env())
if code:
return code
code = call(
["git", "remote", "add", "upstream", "https://github.com/w3c/wptrunner.git"], env=self.build_env())
if code:
return code
code = call(["git", "fetch", "upstream"], env=self.build_env())
if code:
return code
code = call(["git", "reset", "--hard", "remotes/upstream/master"], env=self.build_env())
if code:
return code
code = call(["rm", "-rf", ".git"], env=self.build_env())
if code:
return code
return 0
| mpl-2.0 | 5,119,622,410,882,245,000 | 37.532934 | 115 | 0.573737 | false |
keyurpatel076/MissionPlannerGit | Lib/site-packages/scipy/fftpack/pseudo_diffs.py | 57 | 12479 | """
Differential and pseudo-differential operators.
"""
# Created by Pearu Peterson, September 2002
__all__ = ['diff',
'tilbert','itilbert','hilbert','ihilbert',
'cs_diff','cc_diff','sc_diff','ss_diff',
'shift']
from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj
import convolve
from scipy.fftpack.basic import _datacopied
import atexit
atexit.register(convolve.destroy_convolve_cache)
del atexit
_cache = {}
def diff(x,order=1,period=None,
_cache = _cache):
""" diff(x, order=1, period=2*pi) -> y
Return k-th derivative (or integral) of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j
y_0 = 0 if order is not 0.
Optional input:
order
The order of differentiation. Default order is 1. If order is
negative, then integration is carried out under the assumption
that x_0==0.
period
The assumed period of the sequence. Default is 2*pi.
Notes:
If sum(x,axis=0)=0 then
diff(diff(x,k),-k)==x (within numerical accuracy)
For odd order and even len(x), the Nyquist mode is taken zero.
"""
tmp = asarray(x)
if order==0:
return tmp
if iscomplexobj(tmp):
return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period)
if period is not None:
c = 2*pi/period
else:
c = 1.0
n = len(x)
omega = _cache.get((n,order,c))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,order=order,c=c):
if k:
return pow(c*k,order)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=order,
zero_nyquist=1)
_cache[(n,order,c)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=order%2,
overwrite_x=overwrite_x)
del _cache
_cache = {}
def tilbert(x,h,period=None,
_cache = _cache):
""" tilbert(x, h, period=2*pi) -> y
Return h-Tilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j
y_0 = 0
Input:
h
Defines the parameter of the Tilbert transform.
period
The assumed period of the sequence. Default period is 2*pi.
Notes:
If sum(x,axis=0)==0 and n=len(x) is odd then
tilbert(itilbert(x)) == x
If 2*pi*h/period is approximately 10 or larger then numerically
tilbert == hilbert
(theoretically oo-Tilbert == Hilbert).
For even len(x), the Nyquist mode of x is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return tilbert(tmp.real,h,period)+\
1j*tilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,h=h):
if k: return 1.0/tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def itilbert(x,h,period=None,
_cache = _cache):
""" itilbert(x, h, period=2*pi) -> y
Return inverse h-Tilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
y_0 = 0
Optional input: see tilbert.__doc__
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return itilbert(tmp.real,h,period)+\
1j*itilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,h=h):
if k: return -tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def hilbert(x,
_cache=_cache):
""" hilbert(x) -> y
Return Hilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = sqrt(-1)*sign(j) * x_j
y_0 = 0
Notes:
If sum(x,axis=0)==0 then
hilbert(ihilbert(x)) == x
For even len(x), the Nyquist mode of x is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return hilbert(tmp.real)+1j*hilbert(tmp.imag)
n = len(x)
omega = _cache.get(n)
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k):
if k>0: return 1.0
elif k<0: return -1.0
return 0.0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[n] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
def ihilbert(x):
""" ihilbert(x) -> y
Return inverse Hilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = -sqrt(-1)*sign(j) * x_j
y_0 = 0
"""
return -hilbert(x)
_cache = {}
def cs_diff(x, a, b, period=None,
_cache = _cache):
""" cs_diff(x, a, b, period=2*pi) -> y
Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = 0
Input:
a,b
Defines the parameters of the cosh/sinh pseudo-differential
operator.
period
The period of the sequence. Default period is 2*pi.
Notes:
For even len(x), the Nyquist mode of x is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cs_diff(tmp.real,a,b,period)+\
1j*cs_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
if k: return -cosh(a*k)/sinh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def sc_diff(x, a, b, period=None,
_cache = _cache):
"""
Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
Input array.
a,b : float
Defines the parameters of the sinh/cosh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is 2*pi.
Notes
-----
``sc_diff(cs_diff(x,a,b),b,a) == x``
For even ``len(x)``, the Nyquist mode of x is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return sc_diff(tmp.real,a,b,period)+\
1j*sc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
if k: return sinh(a*k)/cosh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def ss_diff(x, a, b, period=None,
_cache = _cache):
""" ss_diff(x, a, b, period=2*pi) -> y
Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = a/b * x_0
Input:
a,b
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period
The period of the sequence x. Default is 2*pi.
Notes:
ss_diff(ss_diff(x,a,b),b,a) == x
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return ss_diff(tmp.real,a,b,period)+\
1j*ss_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
if k: return sinh(a*k)/sinh(b*k)
return float(a)/b
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def cc_diff(x, a, b, period=None,
_cache = _cache):
""" cc_diff(x, a, b, period=2*pi) -> y
Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
Input:
a,b
Defines the parameters of the sinh/sinh pseudo-differential
operator.
Optional input:
period
The period of the sequence x. Default is 2*pi.
Notes:
cc_diff(cc_diff(x,a,b),b,a) == x
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cc_diff(tmp.real,a,b,period)+\
1j*cc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel(k,a=a,b=b):
return cosh(a*k)/cosh(b*k)
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def shift(x, a, period=None,
_cache = _cache):
""" shift(x, a, period=2*pi) -> y
Shift periodic sequence x by a: y(u) = x(u+a).
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then
y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f
Optional input:
period
The period of the sequences x and y. Default period is 2*pi.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period)
if period is not None:
a = a*2*pi/period
n = len(x)
omega = _cache.get((n,a))
if omega is None:
if len(_cache)>20:
while _cache: _cache.popitem()
def kernel_real(k,a=a): return cos(a*k)
def kernel_imag(k,a=a): return sin(a*k)
omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0,
zero_nyquist=0)
omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1,
zero_nyquist=0)
_cache[(n,a)] = omega_real,omega_imag
else:
omega_real,omega_imag = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve_z(tmp,omega_real,omega_imag,
overwrite_x=overwrite_x)
del _cache
| gpl-3.0 | -1,442,905,106,283,307,800 | 27.886574 | 80 | 0.570639 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.