repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rnd0101/urbanmediator | urbanmediator/fckeditor.py | 1 | 4435 | """
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = "<div>"
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&ToolBar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s___Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
Html += "</div>"
return Html
def IsCompatible(self):
import web
if (web.ctx.environ.has_key("HTTP_USER_AGENT")):
sAgent = web.ctx.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| bsd-3-clause | -8,651,350,915,387,884,000 | 25.214724 | 125 | 0.578805 | false |
skidzo/pydy | pydy/viz/shapes.py | 4 | 19419 | #!/usr/bin/env python
__all__ = ['Cube',
'Cylinder',
'Cone',
'Sphere',
'Circle',
'Plane',
'Tetrahedron',
'Octahedron',
'Icosahedron',
'Torus',
'TorusKnot',
'Tube']
import numpy as np
# This is a list of ColorKeywords from THREE.js
THREE_COLORKEYWORDS = ['aliceblue', 'antiquewhite', 'aqua',
'aquamarine', 'azure', 'beige', 'bisque',
'black', 'blanchedalmond', 'blue', 'blueviolet',
'brown', 'burlywood', 'cadetblue', 'chartreuse',
'chocolate', 'coral', 'cornflowerblue',
'cornsilk', 'crimson', 'cyan', 'darkblue',
'darkcyan', 'darkgoldenrod', 'darkgray',
'darkgreen', 'darkgrey', 'darkkhaki',
'darkmagenta', 'darkolivegreen', 'darkorange',
'darkorchid', 'darkred', 'darksalmon',
'darkseagreen', 'darkslateblue', 'darkslategray',
'darkslategrey', 'darkturquoise', 'darkviolet',
'deeppink', 'deepskyblue', 'dimgray', 'dimgrey',
'dodgerblue', 'firebrick', 'floralwhite',
'forestgreen', 'fuchsia', 'gainsboro',
'ghostwhite', 'gold', 'goldenrod', 'gray',
'green', 'greenyellow', 'grey', 'honeydew',
'hotpink', 'indianred', 'indigo', 'ivory',
'khaki', 'lavender', 'lavenderblush',
'lawngreen', 'lemonchiffon', 'lightblue',
'lightcoral', 'lightcyan',
'lightgoldenrodyellow', 'lightgray',
'lightgreen', 'lightgrey', 'lightpink',
'lightsalmon', 'lightseagreen', 'lightskyblue',
'lightslategray', 'lightslategrey',
'lightsteelblue', 'lightyellow', 'lime',
'limegreen', 'linen', 'magenta', 'maroon',
'mediumaquamarine', 'mediumblue',
'mediumorchid', 'mediumpurple', 'mediumseagreen',
'mediumslateblue', 'mediumspringgreen',
'mediumturquoise', 'mediumvioletred',
'midnightblue', 'mintcream', 'mistyrose',
'moccasin', 'navajowhite', 'navy', 'oldlace',
'olive', 'olivedrab', 'orange', 'orangered',
'orchid', 'palegoldenrod', 'palegreen',
'paleturquoise', 'palevioletred', 'papayawhip',
'peachpuff', 'peru', 'pink', 'plum',
'powderblue', 'purple', 'red', 'rosybrown',
'royalblue', 'saddlebrown', 'salmon',
'sandybrown', 'seagreen', 'seashell', 'sienna',
'silver', 'skyblue', 'slateblue', 'slategray',
'slategrey', 'snow', 'springgreen', 'steelblue',
'tan', 'teal', 'thistle', 'tomato', 'turquoise',
'violet', 'wheat', 'white', 'whitesmoke',
'yellow', 'yellowgreen']
MATERIALS = ["default", "checkerboard", "metal", "dirt", "foil", "water",
"grass"]
class Shape(object):
"""Instantiates a shape. This is primarily used as a superclass for more
specific shapes like Cube, Cylinder, Sphere etc.
Shapes must be associated with a reference frame and a point using the
VisualizationFrame class.
Parameters
==========
name : str, optional
A name assigned to the shape.
color : str, optional
A color string from list of colors in THREE_COLORKEYWORDS
Examples
========
>>> from pydy.viz.shapes import Shape
>>> s = Shape()
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> a = Shape(name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
"""
def __init__(self, name='unnamed', color='grey', material="default"):
self.name = name
self.color = color
self.material = material
self.geometry_attrs = []
def __str__(self):
attributes = ([self.__class__.__name__,
self.name,
'color:' + self.color,
'material:' + self.material] +
sorted([attr + ':{}'.format(getattr(self, attr)) for
attr in self.geometry_attrs]))
return ' '.join(['{}'] * len(attributes)).format(*attributes)
def __repr__(self):
return self.__class__.__name__
@property
def name(self):
"""Returns the name attribute of the shape."""
return self._name
@name.setter
def name(self, new_name):
"""Sets the name attribute of the shape."""
if not isinstance(new_name, str):
raise TypeError("'name' should be a valid str object.")
else:
self._name = new_name
@property
def color(self):
"""Returns the color attribute of the shape."""
return self._color
@color.setter
def color(self, new_color):
"""Sets the color attributes of the shape. This should be a valid
three.js color keyword string."""
if new_color not in THREE_COLORKEYWORDS:
msg = "'color' should be a valid Three.js colors string:\n{}"
raise ValueError(msg.format('\n'.join(THREE_COLORKEYWORDS)))
else:
self._color = new_color
@property
def material(self):
"""Returns the material attribute of the shape."""
return self._material
@material.setter
def material(self, new_material):
"""Sets the material attribute of the shape, i.e. its shine,
brightness, opacity etc.. The material should be a valid material
from the listed MATERIALS. If a shape is attributed as "red" color,
and "water" material, ideally it should have opacity and brightness
properties like that of a red fluid.
"""
if new_material.lower() not in MATERIALS:
msg = "'material' is not valid. Choose from:\n{}"
raise ValueError(msg.format('\n'.join(MATERIALS)))
else:
self._material = new_material
def generate_dict(self, constant_map={}):
"""Returns a dictionary containing all the data associated with the
Shape.
Parameters
==========
constant_map : dictionary
If any of the shape's geometry are defined as SymPy expressions,
then this dictionary should map all SymPy Symbol's found in the
expressions to floats.
"""
data_dict = {}
data_dict['name'] = self.name
data_dict['color'] = self.color
data_dict['material'] = self.material
data_dict['type'] = self.__repr__()
for geom in self.geometry_attrs:
atr = getattr(self, geom)
try:
data_dict[geom] = float(atr.subs(constant_map))
except AttributeError:
# not a SymPy expression
data_dict[geom] = atr
except TypeError:
# can't convert expression to float
raise TypeError('{} is an expression, you '.format(atr) +
'must provide a mapping to numerical values.')
return data_dict
class Cube(Shape):
"""Instantiates a cube of a given size.
Parameters
==========
length : float or SymPy expression
The length of the cube.
Examples
========
>>> from pydy.viz.shapes import Cube
>>> s = Cube(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.length
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> a = Cube('my-shape2', 'red', length=10)
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
"""
def __init__(self, length, **kwargs):
super(Cube, self).__init__(**kwargs)
self.geometry_attrs.append('length')
self.length = length
class Cylinder(Shape):
"""Instantiates a cylinder with given length and radius.
Parameters
==========
length : float or SymPy expression
The length of the cylinder.
radius : float or SymPy expression
The radius of the cylinder.
Examples
========
>>> from pydy.viz.shapes import Cylinder
>>> s = Cylinder(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.radius = 6.0
>>> s.radius
6.0
>>> a = Cylinder(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.radius
5.0
"""
def __init__(self, length, radius, **kwargs):
super(Cylinder, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'radius']
self.length = length
self.radius = radius
class Cone(Shape):
"""Instantiates a cone with given length and base radius.
Parameters
==========
length : float or SymPy expression
The length of the cone.
radius : float or SymPy expression
The base radius of the cone.
Examples
========
>>> from pydy.viz.shapes import Cone
>>> s = Cone(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.radius = 6.0
>>> s.radius
6.0
>>> a = Cone(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.radius
5.0
"""
def __init__(self, length, radius, **kwargs):
super(Cone, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'radius']
self.length = length
self.radius = radius
class Sphere(Shape):
"""Instantiates a sphere with a given radius.
Parameters
==========
radius : float or SymPy expression
The radius of the sphere.
Examples
========
>>> from pydy.viz.shapes import Sphere
>>> s = Sphere(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Sphere(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
def __init__(self, radius=10.0, **kwargs):
super(Sphere, self).__init__(**kwargs)
self.geometry_attrs += ['radius']
self.radius = radius
class Circle(Sphere):
"""Instantiates a circle with a given radius.
Parameters
==========
radius : float or SymPy Expression
The radius of the circle.
Examples
========
>>> from pydy.viz.shapes import Circle
>>> s = Circle(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Circle(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Plane(Shape):
"""Instantiates a plane with a given length and width.
Parameters
==========
length : float or SymPy expression
The length of the plane.
width : float or SymPy expression
The width of the plane.
Examples
========
>>> from pydy.viz.shapes import Plane
>>> s = Plane(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.length
10.0
>>> s.width
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.length = 12.0
>>> s.length
12.0
>>> s.width = 6.0
>>> s.width
6.0
>>> a = Plane(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.length
10.0
>>> a.width
5.0
"""
def __init__(self, length=10.0, width=5.0, **kwargs):
super(Plane, self).__init__(**kwargs)
self.geometry_attrs += ['length', 'width']
self.length = length
self.width = width
class Tetrahedron(Sphere):
"""Instantiates a Tetrahedron inscribed in a given radius circle.
Parameters
==========
radius : float or SymPy expression
The radius of the circum-scribing sphere of around the tetrahedron.
Examples
========
>>> from pydy.viz.shapes import Tetrahedron
>>> s = Tetrahedron(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Tetrahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Octahedron(Sphere):
"""Instantiaties an Octahedron inscribed in a circle of the given
radius.
Parameters
==========
radius : float or SymPy expression.
The radius of the circum-scribing sphere around the octahedron.
Examples
========
>>> from pydy.viz.shapes import Octahedron
>>> s = Octahedron(10.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> a = Octahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Icosahedron(Sphere):
"""Instantiates an icosahedron inscribed in a sphere of the given
radius.
Parameters
==========
radius : float or a SymPy expression
Radius of the circum-scribing sphere for Icosahedron
Examples
========
>>> from pydy.viz.shapes import Icosahedron
>>> s = Icosahedron(10)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>>s.radius
10.0
>>>#These can be changed later too ..
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12
>>> a = Icosahedron(10.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
"""
class Torus(Shape):
"""Instantiates a torus with a given radius and section radius.
Parameters
==========
radius : float or SymPy expression
The radius of the torus.
tube_radius : float or SymPy expression
The radius of the torus tube.
Examples
========
>>> from pydy.viz.shapes import Torus
>>> s = Torus(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.radius
10.0
>>> s.tube_radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> s.tube_radius = 6.0
>>> s.tube_radius
6.0
>>> a = Torus(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
>>> a.tube_radius
5.0
"""
def __init__(self, radius, tube_radius, **kwargs):
super(Torus, self).__init__(**kwargs)
self.geometry_attrs += ['radius', 'tube_radius']
self.radius = radius
self.tube_radius = tube_radius
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, new_radius):
self._radius = new_radius
@property
def tube_radius(self):
return self._tube_radius
@tube_radius.setter
def tube_radius(self, new_tube_radius):
self._tube_radius = new_tube_radius
class TorusKnot(Torus):
"""Instantiates a torus knot with given radius and section radius.
Parameters
==========
radius : float or SymPy expression
The radius of the torus knot.
tube_radius : float or SymPy expression
The radius of the torus knot tube.
Examples
========
>>> from pydy.viz.shapes import TorusKnot
>>> s = TorusKnot(10.0, 5.0)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.radius
10.0
>>> s.tube_radius
5.0
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 12.0
>>> s.radius
12.0
>>> s.tube_radius = 6.0
>>> s.tube_radius
6.0
>>> a = TorusKnot(10.0, 5.0, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
10.0
>>> a.tube_radius
5.0
"""
class Tube(Shape):
"""Instantiates a tube that sweeps along a path.
Parameters
==========
radius : float or SymPy expression
The radius of the tube.
points : array_like, shape(n, 3)
An array of n (x, y, z) coordinates representing points that the
tube's center line should follow.
Examples
========
>>> from pydy.viz.shapes import Tube
>>> points = [[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
>>> s = Tube(10.0, points)
>>> s.name
'unnamed'
>>> s.color
'grey'
>>> s.points
[[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
>>> s.name = 'my-shape1'
>>> s.name
'my-shape1'
>>> s.color = 'blue'
>>> s.color
'blue'
>>> s.radius = 14.0
>>> s.radius
14.0
>>> s.points = [[2.0, 1.0, 4.0], [1.0, 2.0, 4.0],
... [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]]
>>> s.points
[[2.0, 1.0, 4.0], [1.0, 2.0, 4.0], [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]]
>>> a = Tube(12.0, points, name='my-shape2', color='red')
>>> a.name
'my-shape2'
>>> a.color
'red'
>>> a.radius
12.0
>>> a.points
[[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]]
"""
def __init__(self, radius, points, **kwargs):
super(Tube, self).__init__(**kwargs)
self.geometry_attrs += ['radius', 'points']
self.radius = radius
self.points = points
@property
def points(self):
return self._points
@points.setter
def points(self, new_points):
self._points = np.asarray(new_points)
| bsd-3-clause | 4,142,942,683,133,290,000 | 23.48802 | 78 | 0.504351 | false |
ojengwa/oh-mainline | vendor/packages/oauthlib/oauthlib/oauth1/rfc5849/signature.py | 36 | 22922 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of `section 3.4`_ of the spec.
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri query, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
"""
from __future__ import absolute_import, unicode_literals
import binascii
import hashlib
import hmac
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from . import utils
from oauthlib.common import urldecode, extract_params, safe_string_equals
from oauthlib.common import bytes_type, unicode_type
def construct_base_string(http_method, base_string_uri,
normalized_encoded_request_parameters):
"""**String Construction**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_string_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def normalize_base_string_uri(uri, host=None):
"""**Base String URI**
Per `section 3.4.1.2`_ of the spec.
For example, the HTTP request::
GET /r%20v/X?id=123 HTTP/1.1
Host: EXAMPLE.COM:80
is represented by the base string URI: "http://example.com/r%20v/X".
In another example, the HTTPS request::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
is represented by the base string URI: "https://www.example.net:8080/".
.. _`section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
The host argument overrides the netloc part of the uri argument.
"""
if not isinstance(uri, unicode_type):
raise ValueError('uri must be a unicode object.')
# FIXME: urlparse does not support unicode
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the query or fragment) as follows:
#
# .. _`RFC3986`: http://tools.ietf.org/html/rfc3986
if not scheme or not netloc:
raise ValueError('uri must include a scheme and netloc')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 section 5.1.2`: http://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
netloc = host.lower()
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: http://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: http://tools.ietf.org/html/rfc2818
default_ports = (
('http', '80'),
('https', '443'),
)
if ':' in netloc:
host, port = netloc.split(':', 1)
if (scheme, port) in default_ports:
netloc = host
return urlparse.urlunparse((scheme, netloc, path, params, '', ''))
# ** Request Parameters **
#
# Per `section 3.4.1.3`_ of the spec.
#
# In order to guarantee a consistent and reproducible representation of
# the request parameters, the parameters are collected and decoded to
# their original decoded form. They are then sorted and encoded in a
# particular manner that is often different from their original
# encoding scheme, and concatenated into a single string.
#
# .. _`section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
def collect_parameters(uri_query='', body=[], headers=None,
exclude_oauth_signature=True, with_realm=False):
"""**Parameter Sources**
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded query string.
Headers must be supplied as a dict.
Per `section 3.4.1.3.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"
c2&a3=2+q
contains the following (fully decoded) parameters used in the
signature base sting::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | =%3D |
| a3 | a |
| c@ | |
| a2 | r b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
+------------------------+------------------+
Note that the value of "b5" is "=%3D" and not "==". Both "c@" and
"c2" have empty values. While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a "+" character (ASCII code 43) to
represent an encoded space character (ASCII code 32), this practice
is widely used in "application/x-www-form-urlencoded" encoded values,
and MUST be properly decoded, as demonstrated by one of the "a3"
parameter instances (the "a3" parameter is used twice in this
request).
.. _`section 3.4.1.3.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The query component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The query component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by
# `W3C.REC-html40-19980424`_, Section 17.13.4.
#
# .. _`RFC3986, Section 3.4`: http://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
if uri_query:
params.extend(urldecode(uri_query))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = dict((k.lower(), v) for k, v in headers.items())
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if with_realm or i[0] != 'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# `W3C.REC-html40-19980424`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .._`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith('oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
unescaped_params))
return unescaped_params
def normalize_parameters(params):
"""**Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return '&'.join(parameter_parts)
def sign_hmac_sha1_with_client(base_string, client):
return sign_hmac_sha1(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA1**
The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature
algorithm as defined in `RFC2104`_::
digest = HMAC-SHA1 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC2104`: http://tools.ietf.org/html/rfc2104
.. _`section 3.4.2`: http://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA1 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: http://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_rsa_sha1(base_string, rsa_private_key):
"""**RSA-SHA1**
Per `section 3.4.3`_ of the spec.
The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature
algorithm as defined in `RFC3447, Section 8.2`_ (also known as
PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
NOTE: this method requires the python-rsa library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: http://tools.ietf.org/html/rfc3447#section-8.2
"""
# TODO: finish RSA documentation
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
key = RSA.importKey(rsa_private_key)
if isinstance(base_string, unicode_type):
base_string = base_string.encode('utf-8')
h = SHA.new(base_string)
p = PKCS1_v1_5.new(key)
return binascii.b2a_base64(p.sign(h))[:-1].decode('utf-8')
def sign_rsa_sha1_with_client(base_string, client):
return sign_rsa_sha1(base_string, client.rsa_key)
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: http://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or '')
return signature
def sign_plaintext_with_client(base_string, client):
return sign_plaintext(client.client_secret, client.resource_owner_secret)
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: http://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
base_string = construct_base_string(request.http_method, uri, norm_params)
signature = sign_hmac_sha1(base_string, client_secret,
resource_owner_secret)
return safe_string_equals(signature, request.signature)
def verify_rsa_sha1(request, rsa_public_key):
"""Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature.
Per `section 3.4.3`_ of the spec.
Note this method requires the PyCrypto library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: http://tools.ietf.org/html/rfc2616#section-5.2
"""
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
key = RSA.importKey(rsa_public_key)
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
message = construct_base_string(request.http_method, uri, norm_params)
h = SHA.new(message.encode('utf-8'))
p = PKCS1_v1_5.new(key)
sig = binascii.a2b_base64(request.signature.encode('utf-8'))
return p.verify(h, sig)
def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
"""Verify a PLAINTEXT signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
"""
signature = sign_plaintext(client_secret, resource_owner_secret)
return safe_string_equals(signature, request.signature)
| agpl-3.0 | 3,504,474,347,640,565,000 | 36.950331 | 99 | 0.597941 | false |
Omegaphora/external_chromium_org | build/android/findbugs_diff.py | 57 | 1391 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs findbugs, and returns an error code if there are new warnings.
This runs findbugs with an additional flag to exclude known bugs.
To update the list of known bugs, do this:
findbugs_diff.py --rebaseline
Note that this is separate from findbugs_exclude.xml. The "exclude" file has
false positives that we do not plan to fix. The "known bugs" file has real
bugs that we *do* plan to fix (but haven't done so yet).
Other options
--only-analyze used to only analyze the class you are interested.
--relase-build analyze the classes in out/Release directory.
--findbugs-args used to passin other findbugs's options.
Run
$CHROM_SRC/third_party/findbugs/bin/findbugs -textui for details.
"""
import os
import sys
from pylib import constants
from pylib.utils import findbugs
def main():
parser = findbugs.GetCommonParser()
options, _ = parser.parse_args()
if not options.base_dir:
options.base_dir = os.path.join(constants.DIR_SOURCE_ROOT, 'build',
'android', 'findbugs_filter')
if not options.only_analyze:
options.only_analyze = 'org.chromium.-'
return findbugs.Run(options)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 2,403,426,341,814,449,700 | 27.387755 | 76 | 0.712437 | false |
premanandchandrasekar/boto | boto/emr/__init__.py | 6 | 3243 | # Copyright (c) 2010 Spotify AB
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This module provies an interface to the Elastic MapReduce (EMR)
service from AWS.
"""
from connection import EmrConnection
from step import Step, StreamingStep, JarStep
from bootstrap_action import BootstrapAction
from boto.regioninfo import RegionInfo
def regions():
"""
Get all available regions for the Amazon Elastic MapReduce service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
return [RegionInfo(name='us-east-1',
endpoint='elasticmapreduce.us-east-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='us-west-1',
endpoint='elasticmapreduce.us-west-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='us-west-2',
endpoint='elasticmapreduce.us-west-2.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-northeast-1',
endpoint='elasticmapreduce.ap-northeast-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-southeast-1',
endpoint='elasticmapreduce.ap-southeast-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-southeast-2',
endpoint='elasticmapreduce.ap-southeast-2.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='eu-west-1',
endpoint='elasticmapreduce.eu-west-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='sa-east-1',
endpoint='elasticmapreduce.sa-east-1.amazonaws.com',
connection_cls=EmrConnection),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit | 4,380,069,742,725,860,400 | 43.424658 | 80 | 0.655566 | false |
pjmaker/python-tsi-tools | tags.py | 1 | 32923 | # tags.py -- map tags from one namespace to another
# Copyright 2016 Ben Elliston
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
tags = {
'G1_OIL_TOP_UP_PIML': None,
'G1_OIL_USED_IN_SERVICE_PIML': None,
'G2_OIL_TOP_UP_PIML': None,
'G2_OIL_USED_IN_SERVICE_PIML': None,
'G3_OIL_TOP_UP_PIML': None,
'G3_OIL_USED_IN_SERVICE_PIML': None,
'G4_OIL_TOP_UP_PIML': None,
'G4_OIL_USED_IN_SERVICE_PIML': None,
'STATION_MAX_DEMAND_PIML': None,
'F1_OUT_CNT_PIML': None,
'F1_OUT_DATE_PIML': None,
'F1_OUT_HOUR_CNT_PIML': None,
'F1_OUT_TIME_PIML': None,
'F2_OUT_CNT_PIML': None,
'F2_OUT_DATE_PIML': None,
'F2_OUT_HOUR_CNT_PIML': None,
'F2_OUT_TIME_PIML': None,
'F3_OUT_CNT_PIML': None,
'F3_OUT_DATE_PIML': None,
'F3_OUT_HOUR_CNT_PIML': None,
'F3_OUT_TIME_PIML': None,
'F4_OUT_CNT_PIML': None,
'F4_OUT_DATE_PIML': None,
'F4_OUT_HOUR_CNT_PIML': None,
'F4_OUT_TIME_PIML': None,
'Fed1AlarmSt': None,
'Fed1AutoMd': None,
'Fed1BlackBusSt': None,
'Fed1ClosedSt': None,
'Fed1ClsingSt': None,
'Fed1DigIOAl': None,
'Fed1EarthFaultAl': None,
'FED1FACT': None,
'Fed1FtCloseAl': None,
'Fed1FtOpenAl': None,
'Fed1HealthySt': None,
'Fed1I1Act': None,
'Fed1I2Act': None,
'Fed1I3Act': None,
'Fed1KwhTot': None,
'Fed1ManualMd': None,
'Fed1OpenedCnt': None,
'Fed1OpenedSt': None,
'Fed1OpeningSt': None,
'Fed1OvercurrentAl': None,
'Fed1Pact': None,
'Fed1PCloseSet': None,
'Fed1PDemAvr': None,
'Fed1PwrFctAct': None,
'Fed1PwrMonAl': None,
'Fed1QAct': None,
'Fed1ScadaMd': None,
'Fed1SefAl': None,
'Fed1ShedCnt': None,
'Fed1ToutTot': None,
'Fed1TripCnt': None,
'Fed1TrippedAl': None,
'Fed1U12Act': None,
'Fed1U1NAct': None,
'Fed1U23Act': None,
'Fed1U2NAct': None,
'Fed1U31Act': None,
'Fed1U3NAct': None,
'Fed2AlarmSt': None,
'Fed2AutoMd': None,
'Fed2BlackBusSt': None,
'Fed2ClosedSt': None,
'Fed2ClsingSt': None,
'Fed2DigIOAl': None,
'Fed2EarthFaultAl': None,
'Fed2Fact': None,
'Fed2FtCloseAl': None,
'Fed2FtOpenAl': None,
'Fed2HealthySt': None,
'Fed2I1Act': None,
'Fed2I2Act': None,
'Fed2I3Act': None,
'Fed2KwhTot': None,
'Fed2ManualMd': None,
'Fed2OpenedCnt': None,
'Fed2OpenedSt': None,
'Fed2OpeningSt': None,
'Fed2OvercurrentAl': None,
'Fed2Pact': None,
'Fed2PCloseSet': None,
'Fed2PDemAvr': None,
'Fed2PwrFctAct': None,
'Fed2PwrMonAl': None,
'Fed2QAct': None,
'Fed2ScadaMd': None,
'Fed2SefAl': None,
'Fed2ShedCnt': None,
'Fed2ToutTot': None,
'Fed2TripCnt': None,
'Fed2TrippedAl': None,
'Fed2U12Act': None,
'Fed2U1NAct': None,
'Fed2U23Act': None,
'Fed2U2NAct': None,
'Fed2U31Act': None,
'Fed2U3NAct': None,
'Fed3AlarmSt': None,
'Fed3AutoMd': None,
'Fed3BlackBusSt': None,
'Fed3ClosedSt': None,
'Fed3ClsingSt': None,
'Fed3DigIOAl': None,
'Fed3EarthFaultAl': None,
'Fed3Fact': None,
'Fed3FtCloseAl': None,
'Fed3FtOpenAl': None,
'Fed3HealthySt': None,
'Fed3I1Act': None,
'Fed3I2Act': None,
'Fed3I3Act': None,
'Fed3KwhTot': None,
'Fed3ManualMd': None,
'Fed3OpenedCnt': None,
'Fed3OpenedSt': None,
'Fed3OpeningSt': None,
'Fed3OvercurrentAl': None,
'Fed3Pact': None,
'Fed3PCloseSet': None,
'Fed3PDemAvr': None,
'Fed3PwrFctAct': None,
'Fed3PwrMonAl': None,
'Fed3QAct': None,
'Fed3ScadaMd': None,
'Fed3SefAl': None,
'Fed3ShedCnt': None,
'Fed3ToutTot': None,
'Fed3TripCnt': None,
'Fed3TrippedAl': None,
'Fed3U12Act': None,
'Fed3U1NAct': None,
'Fed3U23Act': None,
'Fed3U2NAct': None,
'Fed3U31Act': None,
'Fed3U3NAct': None,
'Fed4AlarmSt': None,
'Fed4AutoMd': None,
'Fed4BlackBusSt': None,
'Fed4ClosedSt': None,
'Fed4ClsingSt': None,
'Fed4DigIOAl': None,
'Fed4EarthFaultAl': None,
'Fed4Fact': None,
'Fed4FtCloseAl': None,
'Fed4FtOpenAl': None,
'Fed4HealthySt': None,
'Fed4I1Act': None,
'Fed4I2Act': None,
'Fed4I3Act': None,
'Fed4KwhTot': None,
'Fed4ManualMd': None,
'Fed4OpenedCnt': None,
'Fed4OpenedSt': None,
'Fed4OpeningSt': None,
'Fed4OvercurrentAl': None,
'Fed4Pact': None,
'Fed4PCloseSet': None,
'Fed4PDemAvr': None,
'Fed4PwrFctAct': None,
'Fed4PwrMonAl': None,
'Fed4QAct': None,
'Fed4ScadaMd': None,
'Fed4SefAl': None,
'Fed4ShedCnt': None,
'Fed4ToutTot': None,
'Fed4TripCnt': None,
'Fed4TrippedAl': None,
'Fed4U12Act': None,
'Fed4U1NAct': None,
'Fed4U23Act': None,
'Fed4U2NAct': None,
'Fed4U31Act': None,
'Fed4U3NAct': None,
'FedAllClosedSt': None,
'FedAllOpenSt': None,
'FedAutoClsdSt': None,
'FedBlackSt': None,
'FedClosingSt': None,
'FedDemandingSt': None,
'FedNoAvailSt': None,
'FedNonManualOpenSt': None,
'FedOpenSt': None,
'FedPact': None,
'FedRotationModeCloseSt': None,
'FedRotationModeOpenSt': None,
'FedRotationSt': None,
'FedRotationWaitSt': None,
'FedSheddingSt': None,
'FedShutdownSt': None,
'FUEL_DELIVERED_PIML': None,
'FUEL_TANK1_PIML': None,
'FUEL_TANK2_PIML': None,
'FUEL_TANK3_PIML': None,
'G1_FUEL_TOTAL_PIML': 'Fuelgen1_l',
'G1_KWH_PIML': None,
'G1_TOTAL_HOURS_RUN_PIML': 'Tgen1_h',
'G2_FUEL_TOTAL_PIML': 'Fuelgen2_l',
'G2_KWH_PIML': None,
'G2_TOTAL_HOURS_RUN_PIML': 'Tgen2_h',
'G3_FUEL_TOTAL_PIML': 'Fuelgen3_l',
'G3_KWH_PIML': None,
'G3_TOTAL_HOURS_RUN_PIML': 'Tgen3_h',
'G4_FUEL_TOTAL_PIML': 'Fuelgen4_l',
'G4_KWH_PIML': None,
'G4_TOTAL_HOURS_RUN_PIML': 'Tgen4_h',
'Gen1AIOutOfRange': None, # generator 1 analogue input out of range
'Gen1AlarmSt': None, # generator 1 alarm state
'Gen1Asymmetry': None, # generator 1 unbalanced load alarm
'Gen1AutoMd': None, # generator 1 auto mode
'Gen1BatUnderVolt': None, # generator 1 battery under voltage
'Gen1BlackSt': None, # generator 1 black state
'Gen1ClosedSt': None, # generator 1 closed state
'Gen1CloseFailGCB': None, # generator 1 generator circuit breaker failed to close
'Gen1CommY1Y6': None, #
'Gen1CoolDownSt': None, # generator 1 cool down state
'Gen1CritAl': None, # generator 1 critical alarm
'Gen1DigIOAl': None, # generator 1 digital I/O alarm
'Gen1ExhTempLeft': None, # generator 1 exhaust temperature left
'Gen1ExhTempRight': None, # generator 1 exhaust temperature right
'Gen1ExtOpenGCB': None, # generator 1 external generator circuit breaker open
'Gen1Fact': 'fgen1_Hz', # generator 1 actual frequency
'Gen1FirstStartMd': None, # generator 1 first start mode
'Gen1FtOpenAl': None, # generator 1 fail to open alarm
'Gen1FtStartAl': None, # generator 1 fail to start alarm
'Gen1FtStopAl': None, # generator 1 fail to stop alarm
'Gen1FtSyncAl': None, # generator 1 fail to sync alarm
'Gen1FuelConAct': None, # generator 1 fuel consumption
'Gen1FuelLitreCnt': None, # generator 1 fuel consumption counter
'Gen1GenFreq': None,
'Gen1GenLoadUnb': None,
'Gen1GenOverCur': None, # generator 1 generator overcurrent
'Gen1GenOverFreq': None, # generator 1 generator overfrequency
'Gen1GenOverload': None, # generator 1 generator overload
'Gen1GenOverSpd': None, # generator 1 generator overspeed
'Gen1GenRevPwr': None, # generator 1 generator reverse power
'Gen1GenUnderFreq': None, # generator 1 generator underfrequency
'Gen1GenUnderVolt': None, # generator 1 generator undervoltage
'Gen1HealthySt': None, # generator 1 healthy state
'Gen1I1Act': None, # generator 1 phase 1 actual current
'Gen1I2Act': None, # generator 1 phase 2 actual current
'Gen1I3Act': None, # generator 1 phase 3 actual current
'Gen1IAct': 'Igen1_A', # FIXME: this tag doesn't exist yet.
'Gen1InternalError7': None, # generator 1 internal error 7 (magic?)
'Gen1KwhTot': 'Egen1_kWh', # generator 1 kwh total
'Gen1LastStartMd': None, # generator 1 last start mode
'Gen1LdFctAct': None, # generator 1 actual load factor
'Gen1MainsOverFrq': None, # generator 1 mains overfrequency
'Gen1MainsOverVolt': None, # generator 1 mains overvoltage
'Gen1MainsUnderFrq': None, # generator 1 mains underfrequency
'Gen1MainsUnderVolt': None, # generator 1 mains undervoltage
'Gen1MainsVectJump': None, # generator 1 mains vector jump (wha?)
'Gen1MaintenanceCall': None,
'Gen1ManualMd': None, # generator 1 manual mode
'Gen1MCBFail': None, # generator 1 MCB failure
'Gen1NonCritAl': None, # generator 1 non-critical alarm
'Gen1OilPrAftAct': None, # generator 1 actual oil pressure after filter
'Gen1OilPrBefAct': None, # generator 1 actual oil pressure before filter
'Gen1OilTact': None, # generator 1 actual oil temperature
'Gen1OpenFailGCB': None, # generator 1 open fail generator circuit breaker
'Gen1OpenSt': None, # generator 1 open state
'Gen1Pact': 'Pgen1_kVA', # generator 1 actual power
'Gen1PderAct': None, # generator 1 actual derated power
'Gen1PreGlowSt': None, # generator 1 pre-glow state
'Gen1PsetSt': None, # generator 1 power setpoint state
'Gen1PwrFctAct': 'PFgen1', # generator 1 actual power factor
'Gen1PwrMonAl': None, # generator 1 power monitor alarm
'Gen1QAct': 'Qgen1_kVAr', # generator 1 actual reactive power
'Gen1RpmAct': None, # generator 1 actual RPM
'Gen1RunningTimeOut': None, # generator 1 generator running timeout
'Gen1RunSt': None, # generator 1 running state
'Gen1ScadaMd': None, # generator 1 SCADA mode
'Gen1StartCnt': None, # generator 1 total number of starts
'Gen1StartingSt': None, # generator 1 starting state
'Gen1StoppingFail': None, # generator 1 failed to stop alarm
'Gen1StoppingSt': None, # generator 1 stopping state
'Gen1StopSt': None, # generator 1 stop state
'Gen1SyncSt': None, # generator 1 synchronisation state
'Gen1SyncTimeExcd': None, # generator 1 sync time exceeded
'Gen1TempDeratedSt': None,
'Gen1ThermOverCur': None, # generator 1 thermal overcurrent
'Gen1TimeoutSwchOnBlackBus': None, #
'Gen1TimeTillNextService': None, # generator 1 time until next service
'Gen1TrunTot': None, # generator 1 total run time
'Gen1U12Act': None, # generator 1 phase 1 to phase 2 voltage
'Gen1U1NAct': None, # generator 1 phase 1 to neutral voltage
'Gen1U23Act': None, # generator 1 phase 2 to phase 3 voltage
'Gen1U2NAct': None, # generator 1 phase 2 to neutral voltage
'Gen1U31Act': None, # generator 1 phase 3 to phase 1 voltage
'Gen1U3NAct': None, # generator 1 phase 3 to neutral voltage
'Gen1UnloadSt': None, # generator 1 unload state
'Gen1WarmUp': None, # generator 1 warm-up state
'Gen1WarnAl': None, # generator 1 warning alarm
'Gen1WatchdogPwr': None,
'Gen1WaterTinAct': None, # generator 1 actual water temp in
'Gen1WaterToutAct': None, # generator 1 actual water temp out
'Gen1WrongStart': None,
'Gen2AIOutOfRange': None, # generator 2 analogue input out of range
'Gen2AlarmSt': None, # generator 2 alarm state
'Gen2Asymmetry': None, # generator 2 unbalanced load alarm
'Gen2AutoMd': None, # generator 2 auto mode
'Gen2BatUnderVolt': None, # generator 2 battery under voltage
'Gen2BlackSt': None, # generator 2 black state
'Gen2ClosedSt': None, # generator 2 closed state
'Gen2CloseFailGCB': None, # generator 2 generator circuit breaker failed to close
'Gen2CommY1Y6': None, #
'Gen2CoolDownSt': None, # generator 2 cool down state
'Gen2CritAl': None, # generator 2 critical alarm
'Gen2DigIOAl': None, # generator 2 digital I/O alarm
'Gen2ExhTempLeft': None, # generator 2 exhaust temperature left
'Gen2ExhTempRight': None, # generator 2 exhaust temperature right
'Gen2ExtOpenGCB': None, # generator 2 external generator circuit breaker open
'Gen2Fact': 'fgen2_Hz', # generator 2 actual frequency
'Gen2FirstStartMd': None, # generator 2 first start mode
'Gen2FtOpenAl': None, # generator 2 fail to open alarm
'Gen2FtStartAl': None, # generator 2 fail to start alarm
'Gen2FtStopAl': None, # generator 2 fail to stop alarm
'Gen2FtSyncAl': None, # generator 2 fail to sync alarm
'Gen2FuelConAct': None, # generator 2 fuel consumption
'Gen2FuelLitreCnt': None, # generator 2 fuel consumption counter
'Gen2GenFreq': None,
'Gen2GenLoadUnb': None,
'Gen2GenOverCur': None, # generator 2 generator overcurrent
'Gen2GenOverFreq': None, # generator 2 generator overfrequency
'Gen2GenOverload': None, # generator 2 generator overload
'Gen2GenOverSpd': None, # generator 2 generator overspeed
'Gen2GenRevPwr': None, # generator 2 generator reverse power
'Gen2GenUnderFreq': None, # generator 2 generator underfrequency
'Gen2GenUnderVolt': None, # generator 2 generator undervoltage
'Gen2HealthySt': None, # generator 2 healthy state
'Gen2I1Act': None, # generator 2 phase 1 actual current
'Gen2I2Act': None, # generator 2 phase 2 actual current
'Gen2I3Act': None, # generator 2 phase 3 actual current
'Gen2IAct': 'Igen2_A', # FIXME: this tag doesn't exist yet.
'Gen2InternalError7': None, # generator 2 internal error 7 (magic?)
'Gen2KwhTot': 'Egen2_kWh', # generator 2 kwh total
'Gen2LastStartMd': None, # generator 2 last start mode
'Gen2LdFctAct': None, # generator 2 actual load factor
'Gen2MainsOverFrq': None, # generator 2 mains overfrequency
'Gen2MainsOverVolt': None, # generator 2 mains overvoltage
'Gen2MainsUnderFrq': None, # generator 2 mains underfrequency
'Gen2MainsUnderVolt': None, # generator 2 mains undervoltage
'Gen2MainsVectJump': None, # generator 2 mains vector jump (wha?)
'Gen2MaintenanceCall': None,
'Gen2ManualMd': None, # generator 2 manual mode
'Gen2MCBFail': None, # generator 2 MCB failure
'Gen2NonCritAl': None, # generator 2 non-critical alarm
'Gen2OilPrAftAct': None, # generator 2 actual oil pressure after filter
'Gen2OilPrBefAct': None, # generator 2 actual oil pressure before filter
'Gen2OilTact': None, # generator 2 actual oil temperature
'Gen2OpenFailGCB': None, # generator 2 open fail generator circuit breaker
'Gen2OpenSt': None, # generator 2 open state
'Gen2Pact': 'Pgen2_kVA', # generator 2 actual power
'Gen2PderAct': None, # generator 2 actual derated power
'Gen2PreGlowSt': None, # generator 2 pre-glow state
'Gen2PsetSt': None, # generator 2 power setpoint state
'Gen2PwrFctAct': 'PFgen2', # generator 2 actual power factor
'Gen2PwrMonAl': None, # generator 2 power monitor alarm
'Gen2QAct': 'Qgen2_kVAr', # generator 2 actual reactive power
'Gen2RpmAct': None, # generator 2 actual RPM
'Gen2RunningTimeOut': None, # generator 2 generator running timeout
'Gen2RunSt': None, # generator 2 running state
'Gen2ScadaMd': None, # generator 2 SCADA mode
'Gen2StartCnt': None, # generator 2 total number of starts
'Gen2StartingSt': None, # generator 2 starting state
'Gen2StoppingFail': None, # generator 2 failed to stop alarm
'Gen2StoppingSt': None, # generator 2 stopping state
'Gen2StopSt': None, # generator 2 stop state
'Gen2SyncSt': None, # generator 2 synchronisation state
'Gen2SyncTimeExcd': None, # generator 2 sync time exceeded
'Gen2TempDeratedSt': None, #
'Gen2ThermOverCur': None, # generator 2 thermal overcurrent
'Gen2TimeoutSwchOnBlackBus': None, #
'Gen2TimeTillNextService': None, # generator 2 time until next service
'Gen2TrunTot': None, # generator 2 total run time
'Gen2U12Act': None, # generator 2 phase 1 to phase 2 voltage
'Gen2U1NAct': None, # generator 2 phase 1 to neutral voltage
'Gen2U23Act': None, # generator 2 phase 2 to phase 3 voltage
'Gen2U2NAct': None, # generator 2 phase 2 to neutral voltage
'Gen2U31Act': None, # generator 2 phase 3 to phase 1 voltage
'Gen2U3NAct': None, # generator 2 phase 3 to neutral voltage
'Gen2UnloadSt': None, # generator 2 unload state
'Gen2WarmUp': None, # generator 2 warm-up state
'Gen2WarnAl': None, # generator 2 warning alarm
'Gen2WatchdogPwr': None,
'Gen2WaterTinAct': None, # generator 2 actual water temp in
'Gen2WaterToutAct': None, # generator 2 actual water temp out
'Gen2WrongStart': None,
'Gen3AIOutOfRange': None, # generator 3 analogue input out of range
'Gen3AlarmSt': None, # generator 3 alarm state
'Gen3Asymmetry': None, # generator 3 unbalanced load alarm
'Gen3AutoMd': None, # generator 3 auto mode
'Gen3BatUnderVolt': None, # generator 3 battery under voltage
'Gen3BlackSt': None, # generator 3 black state
'Gen3ClosedSt': None, # generator 3 closed state
'Gen3CloseFailGCB': None, # generator 3 generator circuit breaker failed to close
'Gen3CommY1Y6': None,
'Gen3CoolDownSt': None, # generator 3 cool down state
'Gen3CritAl': None, # generator 3 critical alarm
'Gen3DigIOAl': None, # generator 3 digital I/O alarm
'Gen3ExhTempLeft': None, # generator 3 exhaust temperature left
'Gen3ExhTempRight': None, # generator 3 exhaust temperature right
'Gen3ExtOpenGCB': None, # generator 3 external generator circuit breaker open
'Gen3Fact': 'fgen3_Hz', # generator 3 actual frequency
'Gen3FirstStartMd': None, # generator 3 first start mode
'Gen3FtOpenAl': None, # generator 3 fail to open alarm
'Gen3FtStartAl': None, # generator 3 fail to start alarm
'Gen3FtStopAl': None, # generator 3 fail to stop alarm
'Gen3FtSyncAl': None, # generator 3 fail to sync alarm
'Gen3FuelConAct': None, # generator 3 fuel consumption
'Gen3FuelLitreCnt': None, # generator 3 fuel consumption counter
'Gen3GenFreq': None,
'Gen3GenLoadUnb': None,
'Gen3GenOverCur': None, # generator 3 generator overcurrent
'Gen3GenOverFreq': None, # generator 3 generator overfrequency
'Gen3GenOverload': None, # generator 3 generator overload
'Gen3GenOverSpd': None, # generator 3 generator overspeed
'Gen3GenRevPwr': None, # generator 3 generator reverse power
'Gen3GenUnderFreq': None, # generator 3 generator underfrequency
'Gen3GenUnderVolt': None, # generator 3 generator undervoltage
'Gen3HealthySt': None, # generator 3 healthy state
'Gen3I1Act': None, # generator 3 phase 1 actual current
'Gen3I2Act': None, # generator 3 phase 2 actual current
'Gen3I3Act': None, # generator 3 phase 3 actual current
'Gen3IAct': 'Igen3_A', # FIXME: this tag doesn't exist yet.
'Gen3InternalError7': None, # generator 3 internal error 7 (magic?)
'Gen3KwhTot': 'Egen3_kWh', # generator 3 kwh total
'Gen3LastStartMd': None, # generator 3 last start mode
'Gen3LdFctAct': None, # generator 3 actual load factor
'Gen3MainsOverFrq': None, # generator 3 mains overfrequency
'Gen3MainsOverVolt': None, # generator 3 mains overvoltage
'Gen3MainsUnderFrq': None, # generator 3 mains underfrequency
'Gen3MainsUnderVolt': None, # generator 3 mains undervoltage
'Gen3MainsVectJump': None, # generator 3 mains vector jump (wha?)
'Gen3MaintenanceCall': None,
'Gen3ManualMd': None, # generator 3 manual mode
'Gen3MCBFail': None, # generator 3 MCB failure
'Gen3NonCritAl': None, # generator 3 non-critical alarm
'Gen3OilPrAftAct': None, # generator 3 actual oil pressure after filter
'Gen3OilPrBefAct': None, # generator 3 actual oil pressure before filter
'Gen3OilTact': None, # generator 3 actual oil temperature
'Gen3OpenFailGCB': None, # generator 3 open fail generator circuit breaker
'Gen3OpenSt': None, # generator 3 open state
'Gen3Pact': 'Pgen3_kVA', # generator 3 actual power
'Gen3PderAct': None, # generator 3 actual derated power
'Gen3PreGlowSt': None, # generator 3 pre-glow state
'Gen3PsetSt': None, # generator 3 power setpoint state
'Gen3PwrFctAct': 'PFgen3', # generator 3 actual power factor
'Gen3PwrMonAl': None, # generator 3 power monitor alarm
'Gen3QAct': 'Qgen3_kVAr', # generator 3 actual reactive power
'Gen3RpmAct': None, # generator 3 actual RPM
'Gen3RunningTimeOut': None, # generator 3 generator running timeout
'Gen3RunSt': None, # generator 3 running state
'Gen3ScadaMd': None, # generator 3 SCADA mode
'Gen3StartCnt': None, # generator 3 total number of starts
'Gen3StartingSt': None, # generator 3 starting state
'Gen3StoppingFail': None, # generator 3 failed to stop alarm
'Gen3StoppingSt': None, # generator 3 stopping state
'Gen3StopSt': None, # generator 3 stop state
'Gen3SyncSt': None, # generator 3 synchronisation state
'Gen3SyncTimeExcd': None, # generator 3 sync time exceeded
'Gen3TempDeratedSt': None,
'Gen3ThermOverCur': None, # generator 3 thermal overcurrent
'Gen3TimeoutSwchOnBlackBus': None, #
'Gen3TimeTillNextService': None, # generator 3 time until next service
'Gen3TrunTot': None, # generator 3 total run time
'Gen3U12Act': None, # generator 3 phase 1 to phase 2 voltage
'Gen3U1NAct': None, # generator 3 phase 1 to neutral voltage
'Gen3U23Act': None, # generator 3 phase 2 to phase 3 voltage
'Gen3U2NAct': None, # generator 3 phase 2 to neutral voltage
'Gen3U31Act': None, # generator 3 phase 3 to phase 1 voltage
'Gen3U3NAct': None, # generator 3 phase 3 to neutral voltage
'Gen3UnloadSt': None, # generator 3 unload state
'Gen3WarmUp': None, # generator 3 warm-up state
'Gen3WarnAl': None, # generator 3 warning alarm
'Gen3WatchdogPwr': None,
'Gen3WaterTinAct': None, # generator 3 actual water temp in
'Gen3WaterToutAct': None, # generator 3 actual water temp out
'Gen3WrongStart': None,
'Gen4AIOutOfRange': None, # generator 4 analogue input out of range
'Gen4AlarmSt': None, # generator 4 alarm state
'Gen4Asymmetry': None, # generator 4 unbalanced load alarm
'Gen4AutoMd': None, # generator 4 auto mode
'Gen4BatUnderVolt': None, # generator 4 battery under voltage
'Gen4BlackSt': None, # generator 4 black state
'Gen4ClosedSt': None, # generator 4 closed state
'Gen4CloseFailGCB': None, # generator 4 generator circuit breaker failed to close
'Gen4CommY1Y6': None, #
'Gen4CoolDownSt': None, # generator 4 cool down state
'Gen4CritAl': None, # generator 4 critical alarm
'Gen4DigIOAl': None, # generator 4 digital I/O alarm
'Gen4ExhTempLeft': None, # generator 4 exhaust temperature left
'Gen4ExhTempRight': None, # generator 4 exhaust temperature right
'Gen4ExtOpenGCB': None, # generator 4 external generator circuit breaker open
'Gen4Fact': 'fgen4_Hz', # generator 4 actual frequency
'Gen4FirstStartMd': None, # generator 4 first start mode
'Gen4FtOpenAl': None, # generator 4 fail to open alarm
'Gen4FtStartAl': None, # generator 4 fail to start alarm
'Gen4FtStopAl': None, # generator 4 fail to stop alarm
'Gen4FtSyncAl': None, # generator 4 fail to sync alarm
'Gen4FuelConAct': None, # generator 4 fuel consumption
'Gen4FuelLitreCnt': None, # generator 4 fuel consumption counter
'Gen4GenFreq': None,
'Gen4GenLoadUnb': None,
'Gen4GenOverCur': None, # generator 4 generator overcurrent
'Gen4GenOverFreq': None, # generator 4 generator overfrequency
'Gen4GenOverload': None, # generator 4 generator overload
'Gen4GenOverSpd': None, # generator 4 generator overspeed
'Gen4GenRevPwr': None, # generator 4 generator reverse power
'Gen4GenUnderFreq': None, # generator 4 generator underfrequency
'Gen4GenUnderVolt': None, # generator 4 generator undervoltage
'Gen4HealthySt': None, # generator 4 healthy state
'Gen4I1Act': None, # generator 4 phase 1 actual current
'Gen4I2Act': None, # generator 4 phase 2 actual current
'Gen4I3Act': None, # generator 4 phase 3 actual current
'Gen4IAct': 'Igen4_A', # FIXME: this tag doesn't exist yet.
'Gen4InternalError7': None, # generator 4 internal error 7 (magic?)
'Gen4KwhTot': 'Egen4_kWh', # generator 4 kwh total
'Gen4LastStartMd': None, # generator 4 last start mode
'Gen4LdFctAct': None, # generator 4 actual load factor
'Gen4MainsOverFrq': None, # generator 4 mains overfrequency
'Gen4MainsOverVolt': None, # generator 4 mains overvoltage
'Gen4MainsUnderFrq': None, # generator 4 mains underfrequency
'Gen4MainsUnderVolt': None, # generator 4 mains undervoltage
'Gen4MainsVectJump': None, # generator 4 mains vector jump (wha?)
'Gen4MaintenanceCall': None,
'Gen4ManualMd': None, # generator 4 manual mode
'Gen4MCBFail': None, # generator 4 MCB failure
'Gen4NonCritAl': None, # generator 4 non-critical alarm
'Gen4OilPrAftAct': None, # generator 4 actual oil pressure after filter
'Gen4OilPrBefAct': None, # generator 4 actual oil pressure before filter
'Gen4OilTact': None, # generator 4 actual oil temperature
'Gen4OpenFailGCB': None, # generator 4 open fail generator circuit breaker
'Gen4OpenSt': None, # generator 4 open state
'Gen4Pact': 'Pgen4_kVA', # generator 4 actual power
'Gen4PderAct': None, # generator 4 actual derated power
'Gen4PreGlowSt': None, # generator 4 pre-glow state
'Gen4PsetSt': None, # generator 4 power setpoint state
'Gen4PwrFctAct': 'PFgen4', # generator 4 actual power factor
'Gen4PwrMonAl': None, # generator 4 power monitor alarm
'Gen4QAct': 'Qgen4_kVAr', # generator 4 actual reactive power
'Gen4RpmAct': None, # generator 4 actual RPM
'Gen4RunningTimeOut': None, # generator 4 generator running timeout
'Gen4RunSt': None, # generator 4 running state
'Gen4ScadaMd': None, # generator 4 SCADA mode
'Gen4StartCnt': None, # generator 4 total number of starts
'Gen4StartingSt': None, # generator 4 starting state
'Gen4StoppingFail': None, # generator 4 failed to stop alarm
'Gen4StoppingSt': None, # generator 4 stopping state
'Gen4StopSt': None, # generator 4 stop state
'Gen4SyncSt': None, # generator 4 synchronisation state
'Gen4SyncTimeExcd': None, # generator 4 sync time exceeded
'Gen4TempDeratedSt': None,
'Gen4ThermOverCur': None, # generator 4 thermal overcurrent
'Gen4TimeoutSwchOnBlackBus': None, #
'Gen4TimeTillNextService': None, # generator 4 time until next service
'Gen4TrunTot': None, # generator 4 total run time
'Gen4U12Act': None, # generator 4 phase 1 to phase 2 voltage
'Gen4U1NAct': None, # generator 4 phase 1 to neutral voltage
'Gen4U23Act': None, # generator 4 phase 2 to phase 3 voltage
'Gen4U2NAct': None, # generator 4 phase 2 to neutral voltage
'Gen4U31Act': None, # generator 4 phase 3 to phase 1 voltage
'Gen4U3NAct': None, # generator 4 phase 3 to neutral voltage
'Gen4UnloadSt': None, # generator 4 unload state
'Gen4WarmUp': None, # generator 4 warm-up state
'Gen4WarnAl': None, # generator 4 warning alarm
'Gen4WatchdogPwr': None,
'Gen4WaterTinAct': None, # generator 4 actual water temp in
'Gen4WaterToutAct': None, # generator 4 actual water temp out
'Gen4WrongStart': None,
'GenActCfgPwr': None,
'GenActCfgSetsGen1': None,
'GenActCfgSetsGen2': None,
'GenActCfgSetsGen3': None,
'GenActCfgSetsGen4': None,
'GenActCfgSetsGen5': None,
'GenActCfgSetsGen6': None,
'GenActCfgSetsGen7': None,
'GenActCfgSetsGen8': None,
'GenBlackSt': None,
'GenCfgOnlSt': None,
'GenNoAvailSt': None,
'GenNonManStopSt': None,
'GenPact': None,
'GenPcfgSet': None,
'GenPwrUpSt': None,
'GenRunSt': None,
'GenShutdownSt': None,
'GenStopSt': None,
'GenSWDownSt': None,
'GenSWUpSt': None,
'GenTransSt': None,
'OIL_STOCK_PIML': None,
'PSetP': None,
'PSetQ': None,
'PvAvailP': None,
'PvMgrPvStRegCriticalAl': None,
'PvMgrPvStRegNoAvailSt': None,
'PvMgrPvStRegNonCriticalAl': None,
'PvMgrPvStRegPwrUpSt': None,
'PvMgrPvStRegRunSt': None,
'PvMgrPvStRegShutdownSt': None,
'PvMgrPvStRegStopSt': None,
'PvMgrPvStRegTransSt': None,
'PVP': 'Ppv_kW',
'PVQ': 'Qpv_kVAr',
'PvStReg': None,
'SkyCam1_10mEstPct': None,
'SkyCam1_10mOk': None,
'SkyCam1_2m10Ok': None,
'SkyCam1_2m1Ok': None,
'SkyCam1_2m2Ok': None,
'SkyCam1_2m3Ok': None,
'SkyCam1_2m4Ok': None,
'SkyCam1_2m5Ok': None,
'SkyCam1_2m6Ok': None,
'SkyCam1_2m7Ok': None,
'SkyCam1_2m8Ok': None,
'SkyCam1_2m9Ok': None,
'SkyCam1_2mEstPct': None,
'SkyCam1_2mOk': None,
'SkyCam1_30mEstPct': None,
'SkyCam1_30mOk': None,
'SkyCam1_Alarm': None,
'SkyCam1_CloudPct': None,
'SkyCam1_SupplierSpec': None,
'SkyCam1_Watchdog': None,
'SkyCam2_10mEstPct': None,
'SkyCam2_10mOk': None,
'SkyCam2_2m10Ok': None,
'SkyCam2_2m1Ok': None,
'SkyCam2_2m2Ok': None,
'SkyCam2_2m3Ok': None,
'SkyCam2_2m4Ok': None,
'SkyCam2_2m5Ok': None,
'SkyCam2_2m6Ok': None,
'SkyCam2_2m7Ok': None,
'SkyCam2_2m8Ok': None,
'SkyCam2_2m9Ok': None,
'SkyCam2_2mEstPct': None,
'SkyCam2_2mOk': None,
'SkyCam2_30mEstPct': None,
'SkyCam2_30mOk': None,
'SkyCam2_Alarm': None,
'SkyCam2_CloudPct': None,
'SkyCam2_SupplierSpec': None,
'SkyCam2_Watchdog': None,
'SkyCam3_10mEstPct': None,
'SkyCam3_10mOk': None,
'SkyCam3_2m10Ok': None,
'SkyCam3_2m1Ok': None,
'SkyCam3_2m2Ok': None,
'SkyCam3_2m3Ok': None,
'SkyCam3_2m4Ok': None,
'SkyCam3_2m5Ok': None,
'SkyCam3_2m6Ok': None,
'SkyCam3_2m7Ok': None,
'SkyCam3_2m8Ok': None,
'SkyCam3_2m9Ok': None,
'SkyCam3_2mEstPct': None,
'SkyCam3_2mOk': None,
'SkyCam3_30mEstPct': None,
'SkyCam3_30mOk': None,
'SkyCam3_Alarm': None,
'SkyCam3_CloudPct': None,
'SkyCam3_SupplierSpec': None,
'SkyCam3_Watchdog': None,
'StatBattChrgFail': None,
'StatBlackAl': None,
'StatEmrgStopAl': None,
'StatFact': None,
'StatFireAl': None,
'StatIntruderAl': None,
'StatLackOfCapAl': None,
'StatNoFedAl': None,
'StatNoGenAl': None,
'StatOverLoadAl': None,
'StatPact': None,
'StatPcloseAct': None,
'StatPconsAct': None,
'StatPllpAct': None,
'StatPowerMonAl': None,
'StatPspinAct': None,
'StatPTotalAct': None,
'StatPwrFctAct': None,
'StatPwrSupplyFailAl': None,
'StatPwrUpSt': None,
'StatQact': None,
'StatRunSt': None,
'StatShutdownSt': None,
'StatStatBlackCnt': None,
'StatStatKwhTot': None,
'StatStatMaxDemTot': None,
'StatStatTblackCnt': None,
'StatStopSt': None,
'StatTempSensAl': None,
'StatUact': None,
'StatUnderFAl': None,
'SYSTIMACT': None,
'SysTimeAct': None,
#
# Weather station data
#
'StatWindSpd': 'vwind_m/s',
'StatWindDir': 'dwind_deg',
'StatAmbTemp': 'Tamb_degC',
'PvCellTemp': 'Tcell_degC',
'StatRainfall': 'Rain_mm',
'StatRelHum': 'Hum_%',
'StatGHI': 'Gghi_W/m2'
}
def transform(pitag):
"""
>>> transform('foobar')
Traceback (most recent call last):
...
ValueError: unknown tag foobar
>>> transform('StatQact')
Traceback (most recent call last):
...
ValueError: StatQact has no mapping
>>> transform('Gen1Pact')
'Pgen1_kVA'
"""
try:
t = tags[pitag]
if t is None:
raise ValueError('%s has no mapping' % pitag)
else:
return t
except KeyError:
raise ValueError('unknown tag %s' % pitag)
| bsd-3-clause | -1,354,862,000,757,330,000 | 43.132708 | 88 | 0.657565 | false |
happy56/kivy | examples/widgets/lists/list_cascade_images.py | 3 | 4936 | from kivy.adapters.dictadapter import DictAdapter
from kivy.uix.selectableview import SelectableView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.listview import ListView, ListItemButton
from kivy.lang import Builder
from kivy.factory import Factory
from fixtures import fruit_categories, fruit_data
from fruit_detail_view import FruitImageDetailView
# This is a copy of list_cascade.py with image thumbnails added to the list
# item views and a larger image shown in the detail view for the selected
# fruit. It uses the kv template method for providing the list item view to
# the listview showing the list of fruits for a selected category.
Factory.register('SelectableView', cls=SelectableView)
Factory.register('ListItemButton', cls=ListItemButton)
# [TODO] Problem: Had to add index here, to get it from ctx. Might need a
# "selection_template" to do this for the dev? Or is this
# the task of the dev to know and follow this need to
# code for index?
Builder.load_string('''
[ThumbnailedListItem@SelectableView+BoxLayout]:
index: ctx.index
fruit_name: ctx.text
size_hint_y: ctx.size_hint_y
height: ctx.height
Image
source: "fruit_images/{0}.32.jpg".format(ctx.text)
ListItemButton:
index: ctx.index
text: ctx.text
''')
# A custom adapter is needed here, because we must transform the selected
# fruit category into the list of fruit keys for that category.
#
class FruitsDictAdapter(DictAdapter):
def fruit_category_changed(self, fruit_categories_adapter, *args):
if len(fruit_categories_adapter.selection) == 0:
self.data = {}
return
category = \
fruit_categories[str(fruit_categories_adapter.selection[0])]
self.sorted_keys = category['fruits']
class CascadingView(GridLayout):
'''Implementation of a cascading style display, with a scrollable list
of fruit categories on the left, a list of thumbnailed fruit items for the
selected category in the middle, and a detail view on the right that shows
a larger fruit image with data.
See list_cascade_dict.py for the same example without images.
'''
def __init__(self, **kwargs):
kwargs['cols'] = 3
kwargs['size_hint'] = (1.0, 1.0)
super(CascadingView, self).__init__(**kwargs)
list_item_args_converter = \
lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 25}
# Fruit categories list on the left:
#
categories = sorted(fruit_categories.keys())
fruit_categories_list_adapter = \
DictAdapter(
sorted_keys=categories,
data=fruit_categories,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
fruit_categories_list_view = \
ListView(adapter=fruit_categories_list_adapter,
size_hint=(.2, 1.0))
self.add_widget(fruit_categories_list_view)
# Fruits, for a given category, in the middle:
#
image_list_item_args_converter = \
lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 32}
fruits_list_adapter = \
FruitsDictAdapter(
sorted_keys=fruit_categories[categories[0]]['fruits'],
data=fruit_data,
args_converter=image_list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
template='ThumbnailedListItem')
fruits_list_view = \
ListView(adapter=fruits_list_adapter,
size_hint=(.2, 1.0))
fruit_categories_list_adapter.bind(
on_selection_change=fruits_list_adapter.fruit_category_changed)
self.add_widget(fruits_list_view)
# Detail view, for a given fruit, on the right:
#
detail_view = FruitImageDetailView(
fruit_name=fruits_list_adapter.selection[0].fruit_name,
size_hint=(.6, 1.0))
fruits_list_adapter.bind(
on_selection_change=detail_view.fruit_changed)
self.add_widget(detail_view)
if __name__ == '__main__':
from kivy.base import runTouchApp
# All fruit categories will be shown in the left left (first argument),
# and the first category will be auto-selected -- Melons. So, set the
# second list to show the melon fruits (second argument).
runTouchApp(CascadingView(width=800))
| lgpl-3.0 | 9,168,165,170,965,134,000 | 36.679389 | 79 | 0.612034 | false |
aquavitae/aafigure | aafigure/aafigure.py | 1 | 46035 | #!/usr/bin/env python
"""\
ASCII art to image converter.
This is the main module that contains the parser.
See svg.py and aa.py for output modules, that can render the parsed structure.
(C) 2006-2009 Chris Liechti <[email protected]>
This is open source software under the BSD license. See LICENSE.txt for more
details.
"""
import codecs
from .error import UnsupportedFormatError
from .shapes import *
from unicodedata import east_asian_width
import sys
NOMINAL_SIZE = 2
CLASS_LINE = 'line'
CLASS_STRING = 'str'
CLASS_RECTANGLE = 'rect'
CLASS_JOIN = 'join'
CLASS_FIXED = 'fixed'
DEFAULT_OPTIONS = dict(
background = '#ffffff',
foreground = '#000000',
line_width = 2.0,
scale = 1.0,
aspect = 1.0,
format = 'svg',
debug = False,
textual = False,
proportional = False,
encoding = 'utf-8',
widechars = 'F,W',
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AsciiArtImage:
"""This class hold a ASCII art figure and has methods to parse it.
The resulting list of shapes is also stored here.
The image is parsed in 2 steps:
1. horizontal string detection.
2. generic shape detection.
Each character that is used in a shape or string is tagged. So that
further searches don't include it again (e.g. text in a string touching
a fill), respectively can use it correctly (e.g. join characters when
two or more lines hit).
"""
QUOTATION_CHARACTERS = list('"\'`')
def __init__(self, text, aspect_ratio=1, textual=False, widechars='F,W'):
"""Take a ASCII art figure and store it, prepare for ``recognize``"""
self.aspect_ratio = float(aspect_ratio)
self.textual = textual
# XXX TODO tab expansion
# detect size of input image, store as list of lines
self.image = []
max_x = 0
y = 0
# define character widths map
charwidths = {}
for key in ['F', 'H', 'W', 'Na', 'A', 'N']:
if key in widechars.split(','):
charwidths[key] = 2
else:
charwidths[key] = 1
for line in text.splitlines():
# extend length by 1 for each wide glyph
line_len = sum(charwidths[east_asian_width(c)] for c in line)
max_x = max(max_x, line_len)
# pad a space for each wide glyph
padded_line = ''.join(c+' '*(charwidths[east_asian_width(c)]-1) for c in line)
self.image.append(padded_line)
y += 1
self.width = max_x
self.height = y
# make sure it's rectangular (extend short lines to max width)
for y, line in enumerate(self.image):
if len(line) < max_x:
self.image[y] = line + ' '*(max_x-len(line))
# initialize other data structures
self.classification = [[None]*self.width for y in range(self.height)]
self.shapes = []
self.nominal_size = NOMINAL_SIZE
def __str__(self):
"""Return the original image"""
return '\n'.join([self.image[y] for y in range(self.height)])
def get(self, x, y):
"""Get character from image. Gives no error for access out of
bounds, just returns a space. This simplifies the scanner
functions.
"""
if 0 <= x < self.width and 0 <= y < self.height:
return self.image[y][x]
else:
return ' '
def tag(self, coordinates, classification):
"""Tag coordinates as used, store classification"""
for x, y in coordinates:
self.classification[y][x] = classification
def cls(self, x, y):
"""get tag at coordinate"""
try:
return self.classification[y][x]
except IndexError:
return 'outside'
# Coordinate conversion and shifting
def left(self, x):
return x*NOMINAL_SIZE*self.aspect_ratio
def hcenter(self, x):
return (x + 0.5)*NOMINAL_SIZE*self.aspect_ratio
def right(self, x):
return (x + 1)*NOMINAL_SIZE*self.aspect_ratio
def top(self, y):
return y*NOMINAL_SIZE
def vcenter(self, y):
return (y + 0.5)*NOMINAL_SIZE
def bottom(self, y):
return (y + 1)*NOMINAL_SIZE
def recognize(self):
"""
Try to convert ASCII art to vector graphics. The result is stored in
``self.shapes``.
"""
# XXX search for symbols
#~ #search for long strings
#~ for y in range(self.height):
#~ for x in range(self.width):
#~ character = self.image[y][x]
#~ if self.classification[y][x] is None:
#~ if character.isalnum():
#~ self.shapes.extend(
#~ self._follow_horizontal_string(x, y)
#~ )
# search for quoted texts
for y in range(self.height):
for x in range(self.width):
#if not yet classified, check for a line
character = self.image[y][x]
if character in self.QUOTATION_CHARACTERS and self.classification[y][x] is None:
self.shapes.extend(
self._follow_horizontal_string(x, y, quoted=True))
# search for standard shapes
for y in range(self.height):
for x in range(self.width):
#if not yet classified, check for a line
character = self.image[y][x]
if self.classification[y][x] is None:
if character == '-':
self.shapes.extend(self._follow_horizontal_line(x, y))
elif character == '|':
self.shapes.extend(self._follow_vertical_line(x, y))
elif character == '_':
self.shapes.extend(self._follow_lower_horizontal_line(x, y))
elif character == '~':
self.shapes.extend(self._follow_upper_horizontal_line(x, y))
elif character == '=':
self.shapes.extend(self._follow_horizontal_line(x, y, thick=True))
elif character in '\\/':
self.shapes.extend(self._follow_rounded_edge(x, y))
elif character == '+':
self.shapes.extend(self._plus_joiner(x, y))
elif character in self.FIXED_CHARACTERS:
self.shapes.extend(self.get_fixed_character(character)(x, y))
self.tag([(x, y)], CLASS_FIXED)
elif character in self.FILL_CHARACTERS:
if self.textual:
if self.get(x, y+1) == character:
self.shapes.extend(self._follow_fill(character, x, y))
else:
if (self.get(x+1, y) == character or self.get(x, y+1) == character):
self.shapes.extend(self._follow_fill(character, x, y))
# search for short strings too
for y in range(self.height):
for x in range(self.width):
character = self.image[y][x]
if self.classification[y][x] is None:
if character != ' ':
self.shapes.extend(self._follow_horizontal_string(x, y, accept_anything=True))
# - - - - - - - - - helper function for some shapes - - - - - - - - -
# Arrow drawing functions return the (new) starting point of the line and a
# list of shapes that draw the arrow. The line itself is not included in
# the list of shapes. The stating point is p1, possibly modified to match
# the shape of the arrow head.
#
# Use complex numbers as 2D vectors as that means easy transformations like
# scaling, rotation and translation
# - - - - - - - - - arrows - - - - - - - - -
def _standard_arrow(self, p1, p2):
"""-->
return a possibly modified starting point and a list of shapes
"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
return p1, [
Line(p1, p1-direction_vector*1.5+direction_vector*0.5j),
Line(p1, p1-direction_vector*1.5+direction_vector*-0.5j)
]
def _reversed_arrow(self, p1, p2):
"""--<"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
return p1-direction_vector*2, [
Line(p1-direction_vector*2.0, p1+direction_vector*(-0.5+0.5j)),
Line(p1-direction_vector*2.0, p1+direction_vector*(-0.5-0.5j))
]
def _circle_head(self, p1, p2, radius=0.5):
"""--o"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
return p1-direction_vector, [Circle(p1-direction_vector, radius)]
def _large_circle_head(self, p1, p2):
"""--O"""
return self._circle_head(p1, p2, radius=0.9)
def _rectangular_head(self, p1, p2):
"""--#"""
direction_vector = p1 - p2
direction_vector /= abs(direction_vector)
#~ return p1-direction_vector*1.414, [
#~ Rectangle(p1-direction_vector-direction_vector*(0.707+0.707j),
#~ p1-direction_vector+direction_vector*(0.707+0.707j))
#~ ]
return p1-direction_vector*1.707, [
Line(p1-direction_vector-direction_vector*(0.707+0.707j),
p1-direction_vector-direction_vector*(0.707-0.707j)),
Line(p1-direction_vector+direction_vector*(0.707+0.707j),
p1-direction_vector+direction_vector*(0.707-0.707j)),
Line(p1-direction_vector-direction_vector*(0.707+0.707j),
p1-direction_vector+direction_vector*(0.707-0.707j)),
Line(p1-direction_vector-direction_vector*(0.707-0.707j),
p1-direction_vector+direction_vector*(0.707+0.707j)),
]
# the same character can mean a different thing, depending from where the
# line is coming. this table maps line direction (dx,dy) and the arrow
# character to a arrow drawing function
ARROW_TYPES = [
#chr dx dy arrow type
('>', 1, 0, '_standard_arrow'),
('<', -1, 0, '_standard_arrow'),
('^', 0, -1, '_standard_arrow'),
('A', 0, -1, '_standard_arrow'),
('V', 0, 1, '_standard_arrow'),
('v', 0, 1, '_standard_arrow'),
('>', -1, 0, '_reversed_arrow'),
('<', 1, 0, '_reversed_arrow'),
('^', 0, 1, '_reversed_arrow'),
('V', 0, -1, '_reversed_arrow'),
('v', 0, -1, '_reversed_arrow'),
('o', 1, 0, '_circle_head'),
('o', -1, 0, '_circle_head'),
('o', 0, -1, '_circle_head'),
('o', 0, 1, '_circle_head'),
('O', 1, 0, '_large_circle_head'),
('O', -1, 0, '_large_circle_head'),
('O', 0, -1, '_large_circle_head'),
('O', 0, 1, '_large_circle_head'),
('#', 1, 0, '_rectangular_head'),
('#', -1, 0, '_rectangular_head'),
('#', 0, -1, '_rectangular_head'),
('#', 0, 1, '_rectangular_head'),
]
ARROW_HEADS = list('<>AVv^oO#')
def get_arrow(self, character, dx, dy):
"""return arrow drawing function or None"""
for head, ddx, ddy, function_name in self.ARROW_TYPES:
if character == head and dx == ddx and dy == ddy:
return getattr(self, function_name)
# - - - - - - - - - fills - - - - - - - - -
# Fill functions return a list of shapes. Each one if covering one cell
# size.
def _hatch_left(self, x, y):
return self._n_hatch_diagonal(x, y, 1, True)
def _hatch_right(self, x, y):
return self._n_hatch_diagonal(x, y, 1, False)
def _cross_hatch(self, x, y):
return self._n_hatch_diagonal(x, y, 1, True) + \
self._n_hatch_diagonal(x, y, 1, False)
def _double_hatch_left(self, x, y):
return self._n_hatch_diagonal(x, y, 2, True)
def _double_hatch_right(self, x, y):
return self._n_hatch_diagonal(x, y, 2, False)
def _double_cross_hatch(self, x, y):
return self._n_hatch_diagonal(x, y, 2, True) + \
self._n_hatch_diagonal(x, y, 2, False)
def _triple_hatch_left(self, x, y):
return self._n_hatch_diagonal(x, y, 3, True)
def _triple_hatch_right(self, x, y):
return self._n_hatch_diagonal(x, y, 3, False)
def _triple_cross_hatch(self, x, y):
return self._n_hatch_diagonal(x, y, 3, True) + \
self._n_hatch_diagonal(x, y, 3, False)
def _n_hatch_diagonal(self, x, y, n, left=False):
"""hatch generator function"""
d = 1/float(n)
result = []
if left:
for i in range(n):
result.append(Line(
Point(self.left(x), self.top(y+d*i)),
Point(self.right(x-d*i), self.bottom(y))
))
if n:
result.append(Line(
Point(self.right(x-d*i), self.top(y)),
Point(self.right(x), self.top(y+d*i))
))
else:
for i in range(n):
result.append(Line(Point(self.left(x), self.top(y+d*i)), Point(self.left(x+d*i), self.top(y))))
if n:
result.append(Line(Point(self.left(x+d*i), self.bottom(y)), Point(self.right(x), self.top(y+d*i))))
return result
def _hatch_v(self, x, y):
return self._n_hatch_straight(x, y, 1, True)
def _hatch_h(self, x, y):
return self._n_hatch_straight(x, y, 1, False)
def _hv_hatch(self, x, y):
return self._n_hatch_straight(x, y, 1, True) + \
self._n_hatch_straight(x, y, 1, False)
def _double_hatch_v(self, x, y):
return self._n_hatch_straight(x, y, 2, True)
def _double_hatch_h(self, x, y):
return self._n_hatch_straight(x, y, 2, False)
def _double_hv_hatch(self, x, y):
return self._n_hatch_straight(x, y, 2, True) + \
self._n_hatch_straight(x, y, 2, False)
def _triple_hatch_v(self, x, y):
return self._n_hatch_straight(x, y, 3, True)
def _triple_hatch_h(self, x, y):
return self._n_hatch_straight(x, y, 3, False)
def _triple_hv_hatch(self, x, y):
return self._n_hatch_straight(x, y, 3, True) + \
self._n_hatch_straight(x, y, 3, False)
def _n_hatch_straight(self, x, y, n, vertical=False):
"""hatch generator function"""
d = 1/float(n)
offset = 1.0/(n+1)
result = []
if vertical:
for i in range(n):
i = i + offset
result.append(Line(
Point(self.left(x+d*i), self.top(y)),
Point(self.left(x+d*i), self.bottom(y))
))
#~ if n:
#~ result.append(Line(Point(self.right(x-d*i), self.top(y)), Point(self.right(x), self.top(y+d*i))))
else:
for i in range(n):
i = i + offset
result.append(Line(
Point(self.left(x), self.top(y+d*i)),
Point(self.right(x), self.top(y+d*i))
))
#~ if n:
#~ result.append(Line(Point(self.left(x+d*i), self.bottom(y)), Point(self.right(x), self.top(y+d*i))))
return result
def _fill_trail(self, x, y):
return [
Line(
Point(self.left(x+0.707), self.top(y)),
Point(self.right(x), self.bottom(y-0.707))
),
Line(
Point(self.left(x), self.top(y+0.707)),
Point(self.right(x-0.707), self.bottom(y))
)
]
def _fill_foreground(self, x, y):
return [
Rectangle(
Point(self.left(x), self.top(y)),
Point(self.right(x), self.bottom(y))
)
]
def _fill_background(self, x, y):
return []
def _fill_small_circle(self, x, y):
return [
Circle(Point(self.left(x+0.5), self.top(y+0.5)), 0.2)
]
def _fill_medium_circle(self, x, y):
return [
Circle(Point(self.left(x+0.5), self.top(y+0.5)), 0.4)
]
def _fill_large_circle(self, x, y):
return [
Circle(Point(self.left(x+0.5), self.top(y+0.5)), 0.9)
]
def _fill_qmark(self, x, y):
return [
Label(Point(self.left(x), self.bottom(y)), '?')
]
def _fill_triangles(self, x, y):
return [
Line(Point(self.left(x+0.5), self.top(y+0.2)), Point(self.left(x+0.75), self.top(y+0.807))),
Line(Point(self.left(x+0.7), self.top(y+0.807)), Point(self.left(x+0.25), self.top(y+0.807))),
Line(Point(self.left(x+0.25), self.top(y+0.807)), Point(self.left(x+0.5), self.top(y+0.2))),
]
FILL_TYPES = [
('A', '_hatch_left'),
('B', '_hatch_right'),
('C', '_cross_hatch'),
('D', '_double_hatch_left'),
('E', '_double_hatch_right'),
('F', '_double_cross_hatch'),
('G', '_triple_hatch_left'),
('H', '_triple_hatch_right'),
('I', '_triple_cross_hatch'),
('J', '_hatch_v'),
('K', '_hatch_h'),
('L', '_hv_hatch'),
('M', '_double_hatch_v'),
('N', '_double_hatch_h'),
('O', '_double_hv_hatch'),
('P', '_triple_hatch_v'),
('Q', '_triple_hatch_h'),
('R', '_triple_hv_hatch'),
('S', '_fill_qmark'),
('T', '_fill_trail'),
('U', '_fill_small_circle'),
('V', '_fill_medium_circle'),
('W', '_fill_large_circle'),
('X', '_fill_foreground'),
('Y', '_fill_triangles'),
('Z', '_fill_background'),
]
FILL_CHARACTERS = ''.join([t+t.lower() for (t, f) in FILL_TYPES])
def get_fill(self, character):
"""return fill function"""
for head, function_name in self.FILL_TYPES:
if character == head:
return getattr(self, function_name)
raise ValueError('no such fill type')
# - - - - - - - - - fixed characters and their shapes - - - - - - - - -
def _open_triangle_left(self, x, y):
return [
Line(
Point(self.left(x), self.vcenter(y)),
Point(self.right(x), self.top(y))
),
Line(
Point(self.left(x), self.vcenter(y)),
Point(self.right(x), self.bottom(y))
)
]
def _open_triangle_right(self, x, y):
return [
Line(
Point(self.right(x), self.vcenter(y)),
Point(self.left(x), self.top(y))
),
Line(
Point(self.right(x), self.vcenter(y)),
Point(self.left(x), self.bottom(y))
)
]
def _circle(self, x, y):
return [
Circle(Point(self.hcenter(x), self.vcenter(y)), NOMINAL_SIZE/2.0)
]
FIXED_TYPES = [
('{', '_open_triangle_left'),
('}', '_open_triangle_right'),
('*', '_circle'),
]
FIXED_CHARACTERS = ''.join([t for (t, f) in FIXED_TYPES])
def get_fixed_character(self, character):
"""return fill function"""
for head, function_name in self.FIXED_TYPES:
if character == head:
return getattr(self, function_name)
raise ValueError('no such character')
# - - - - - - - - - helper function for shape recognition - - - - - - - - -
def _follow_vertical_line(self, x, y):
"""find a vertical line with optional arrow heads"""
# follow line to the bottom
_, end_y, line_end_style = self._follow_line(x, y, dy=1, line_character='|')
# follow line to the top
_, start_y, line_start_style = self._follow_line(x, y, dy=-1, line_character='|')
# if a '+' follows a line, then the line is stretched to hit the '+' center
start_y_fix = end_y_fix = 0
if self.get(x, start_y - 1) == '+':
start_y_fix = -0.5
if self.get(x, end_y + 1) == '+':
end_y_fix = 0.5
# tag characters as used (not the arrow heads)
self.tag([(x, y) for y in range(start_y, end_y + 1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.hcenter(x), self.top(start_y + start_y_fix))
p2 = complex(self.hcenter(x), self.bottom(end_y + end_y_fix))
shapes = []
if line_start_style:
p1, arrow_shapes = line_start_style(p1, p2)
shapes.extend(arrow_shapes)
if line_end_style:
p2, arrow_shapes = line_end_style(p2, p1)
shapes.extend(arrow_shapes)
shapes.append(Line(p1, p2))
return group(shapes)
def _follow_horizontal_line(self, x, y, thick=False):
"""find a horizontal line with optional arrow heads"""
if thick:
line_character = '='
else:
line_character = '-'
# follow line to the right
end_x, _, line_end_style = self._follow_line(x, y, dx=1, line_character=line_character)
# follow line to the left
start_x, _, line_start_style = self._follow_line(x, y, dx=-1, line_character=line_character)
start_x_fix = end_x_fix = 0
if self.get(start_x - 1, y) == '+':
start_x_fix = -0.5
if self.get(end_x + 1, y) == '+':
end_x_fix = 0.5
self.tag([(x, y) for x in range(start_x, end_x+1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.left(start_x + start_x_fix), self.vcenter(y))
p2 = complex(self.right(end_x + end_x_fix), self.vcenter(y))
shapes = []
if line_start_style:
p1, arrow_shapes = line_start_style(p1, p2)
shapes.extend(arrow_shapes)
if line_end_style:
p2, arrow_shapes = line_end_style(p2, p1)
shapes.extend(arrow_shapes)
shapes.append(Line(p1, p2, thick=thick))
return group(shapes)
def _follow_lower_horizontal_line(self, x, y):
"""find a horizontal line, the line is aligned to the bottom and a bit
wider, so that it can be used for shapes like this:
___
__| |___
"""
# follow line to the right
end_x, _, line_end_style = self._follow_line(x, y, dx=1, line_character='_', arrows=False)
# follow line to the left
start_x, _, line_start_style = self._follow_line(x, y, dx=-1, line_character='_', arrows=False)
self.tag([(x, y) for x in range(start_x, end_x+1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.hcenter(start_x-1), self.bottom(y))
p2 = complex(self.hcenter(end_x+1), self.bottom(y))
return [Line(p1, p2)]
def _follow_upper_horizontal_line(self, x, y):
"""find a horizontal line, the line is aligned to the bottom and a bit
wider, so that it can be used for shapes like this:
|~~~|
~~ ~~~
"""
# follow line to the right
end_x, _, line_end_style = self._follow_line(x, y, dx=1, line_character='~', arrows=False)
# follow line to the left
start_x, _, line_start_style = self._follow_line(x, y, dx=-1, line_character='~', arrows=False)
self.tag([(x, y) for x in range(start_x, end_x+1)], CLASS_LINE)
# return the new shape object with arrows etc.
p1 = complex(self.hcenter(start_x-1), self.top(y))
p2 = complex(self.hcenter(end_x+1), self.top(y))
return [Line(p1, p2)]
def _follow_line(self, x, y, dx=0, dy=0, line_character=None, arrows=True):
"""helper function for all the line functions"""
# follow line in the given direction
while 0 <= x < self.width and 0<= y < self.height and self.get(x+dx, y+dy) == line_character:
x += dx
y += dy
if arrows:
# check for arrow head
following_character = self.get(x + dx, y + dy)
if following_character in self.ARROW_HEADS:
line_end_style = self.get_arrow(following_character, dx, dy)
if line_end_style:
x += dx
y += dy
else:
line_end_style = None
else:
line_end_style = None
return x, y, line_end_style
def _plus_joiner(self, x, y):
"""adjacent '+' signs are connected with a line from center to center
required for images like these:
+---+ The box should be closed on all sides
| +---> and the arrow start should touch the box
+---+
"""
result = []
#~ for dx, dy in ((1,0), (-1,0), (0,1), (0,-1)):
# looking right and down is sufficient as the scan is done from left to
# right, top to bottom
for dx, dy in ((1, 0), (0, 1)):
if self.get(x + dx, y + dy) == '+':
result.append(Line(
Point(self.hcenter(x), self.vcenter(y)),
Point(self.hcenter(x + dx), self.vcenter(y + dy))
))
self.tag([(x, y)], CLASS_JOIN)
return result
def _follow_fill(self, character, start_x, start_y):
"""fill shapes like the ones below with a pattern. when the character is
upper case, draw a border too.
XXX aaa BB
XXX a
"""
fill = self.get_fill(character.upper())
border = character.isupper()
result = []
# flood fill algorithm, searching for similar characters
coordinates = []
to_scan = [(start_x, start_y)]
while to_scan:
x, y = to_scan.pop()
if self.cls(x, y) is None:
if self.get(x, y) == character:
result.extend(fill(x, y))
self.tag([(x, y)], CLASS_RECTANGLE)
if self.get(x + 1, y) == character:
if self.cls(x + 1, y) is None:
to_scan.append((x + 1, y))
elif border:
result.append(Line(
Point(self.right(x), self.top(y)),
Point(self.right(x), self.bottom(y))))
if self.get(x - 1, y) == character:
if self.cls(x - 1, y) is None:
to_scan.append((x - 1, y))
elif border:
result.append(Line(
Point(self.left(x), self.top(y)),
Point(self.left(x), self.bottom(y))))
if self.get(x, y + 1) == character:
if self.cls(x, y + 1) is None:
to_scan.append((x, y + 1))
elif border:
result.append(Line(
Point(self.left(x), self.bottom(y)),
Point(self.right(x), self.bottom(y))))
if self.get(x, y - 1) == character:
if self.cls(x, y - 1) is None:
to_scan.append((x, y - 1))
elif border:
result.append(Line(
Point(self.left(x), self.top(y)),
Point(self.right(x), self.top(y))))
return group(result)
def _follow_horizontal_string(self, start_x, y, accept_anything=False, quoted=False):
"""find a string. may contain single spaces, but the detection is
aborted after more than one space.
Text one "Text two"
accept_anything means that all non space characters are interpreted
as text.
"""
# follow line from left to right
if quoted:
quotation_character = self.get(start_x, y)
x = start_x + 1
else:
quotation_character = None
x = start_x
text = []
if self.get(x, y) != ' ':
text.append(self.get(x, y))
self.tag([(x, y)], CLASS_STRING)
is_first_space = True
while 0 <= x + 1 < self.width and self.cls(x + 1, y) is None:
if not quoted:
if self.get(x + 1, y) == ' ' and not is_first_space:
break
if not accept_anything and not self.get(x + 1, y).isalnum():
break
x += 1
character = self.get(x, y)
if character == quotation_character:
self.tag([(x, y)], CLASS_STRING)
break
text.append(character)
if character == ' ':
is_first_space = False
else:
is_first_space = True
if text[-1] == ' ':
del text[-1]
x -= 1
self.tag([(x, y) for x in range(start_x, x + 1)], CLASS_STRING)
return [Label(
Point(self.left(start_x), self.bottom(y)),
''.join(text)
)]
else:
return []
def _follow_rounded_edge(self, x, y):
"""check for rounded edges:
/- | -\- | and also \ / etc.
| -/ | \- - |
"""
result = []
if self.get(x, y) == '/':
# rounded rectangles
if (self.get(x + 1, y) == '-' and self.get(x, y + 1) == '|'):
# upper left corner
result.append(Arc(
Point(self.hcenter(x), self.bottom(y)), 90,
Point(self.right(x), self.vcenter(y)), 180
))
if self.get(x - 1, y) == '-' and self.get(x, y - 1) == '|':
# lower right corner
result.append(Arc(
Point(self.hcenter(x), self.top(y)), -90,
Point(self.left(x), self.vcenter(y)), 0
))
if not result:
# if used as diagonal line
p1 = p2 = None
a1 = a2 = 0
arc = c1 = c2 = False
if self.get(x + 1, y - 1) == '|':
p1 = Point(self.hcenter(x + 1), self.top(y))
a1 = -90
arc = c1 = True
elif self.get(x + 1, y - 1) == '+':
p1 = Point(self.hcenter(x + 1), self.vcenter(y - 1))
a1 = -135
elif self.get(x + 1, y - 1) == '-':
p1 = Point(self.right(x), self.vcenter(y - 1))
a1 = 180
arc = c1 = True
elif self.get(x + 1, y - 1) == '/':
p1 = Point(self.right(x), self.top(y))
a1 = -135
c1 = True
elif self.get(x + 1, y) == '|':
p1 = Point(self.hcenter(x + 1), self.top(y))
elif self.get(x, y - 1) == '-':
p1 = Point(self.right(x), self.vcenter(y - 1))
if self.get(x - 1, y + 1) == '|':
p2 = Point(self.hcenter(x - 1), self.top(y + 1))
a2 = 90
arc = c2 = True
elif self.get(x - 1, y + 1) == '+':
p2 = Point(self.hcenter(x - 1), self.vcenter(y + 1))
a2 = 45
elif self.get(x - 1, y + 1) == '-':
p2 = Point(self.left(x), self.vcenter(y + 1))
a2 = 0
arc = c2 = True
elif self.get(x - 1, y + 1) == '/':
p2 = Point(self.left(x), self.bottom(y))
a2 = 45
c2 = True
elif self.get(x - 1, y) == '|':
p2 = Point(self.hcenter(x - 1), self.bottom(y))
elif self.get(x, y + 1) == '-':
p2 = Point(self.left(x), self.vcenter(y + 1))
if p1 or p2:
if not p1:
p1 = Point(self.right(x), self.top(y))
if not p2:
p2 = Point(self.left(x), self.bottom(y))
if arc:
result.append(Arc(p1, a1, p2, a2, c1, c2))
else:
result.append(Line(p1, p2))
else: # '\'
# rounded rectangles
if self.get(x-1, y) == '-' and self.get(x, y + 1) == '|':
# upper right corner
result.append(Arc(
Point(self.hcenter(x), self.bottom(y)), 90,
Point(self.left(x), self.vcenter(y)), 0
))
if self.get(x+1, y) == '-' and self.get(x, y - 1) == '|':
# lower left corner
result.append(Arc(
Point(self.hcenter(x), self.top(y)), -90,
Point(self.right(x), self.vcenter(y)), 180
))
if not result:
# if used as diagonal line
p1 = p2 = None
a1 = a2 = 0
arc = c1 = c2 = False
if self.get(x - 1, y - 1) == '|':
p1 = Point(self.hcenter(x-1), self.top(y))
a1 = -90
arc = c1 = True
elif self.get(x - 1, y - 1) == '+':
p1 = Point(self.hcenter(x-1), self.vcenter(y - 1))
a1 = -45
elif self.get(x - 1, y - 1) == '-':
p1 = Point(self.left(x), self.vcenter(y-1))
a1 = 0
arc = c1 = True
elif self.get(x - 1, y - 1) == '\\':
p1 = Point(self.left(x), self.top(y))
a1 = -45
c1 = True
elif self.get(x - 1, y) == '|':
p1 = Point(self.hcenter(x-1), self.top(y))
elif self.get(x, y - 1) == '-':
p1 = Point(self.left(x), self.hcenter(y - 1))
if self.get(x + 1, y + 1) == '|':
p2 = Point(self.hcenter(x+1), self.top(y + 1))
a2 = 90
arc = c2 = True
elif self.get(x + 1, y + 1) == '+':
p2 = Point(self.hcenter(x+1), self.vcenter(y + 1))
a2 = 135
elif self.get(x + 1, y + 1) == '-':
p2 = Point(self.right(x), self.vcenter(y + 1))
a2 = 180
arc = c2 = True
elif self.get(x + 1, y + 1) == '\\':
p2 = Point(self.right(x), self.bottom(y))
a2 = 135
c2 = True
elif self.get(x + 1, y) == '|':
p2 = Point(self.hcenter(x+1), self.bottom(y))
elif self.get(x, y + 1) == '-':
p2 = Point(self.right(x), self.vcenter(y + 1))
if p1 or p2:
if not p1:
p1 = Point(self.left(x), self.top(y))
if not p2:
p2 = Point(self.right(x), self.bottom(y))
if arc:
result.append(Arc(p1, a1, p2, a2, c1, c2))
else:
result.append(Line(p1, p2))
if result:
self.tag([(x, y)], CLASS_JOIN)
return group(result)
def process(input, visitor_class, options=None):
"""\
Parse input and render using the given visitor class.
:param input: String or file like object with the image as text.
:param visitor_class: A class object, it will be used to render the
resulting image.
:param options: A dictionary containing the settings. When ``None`` is
given, defaults are used.
:returns: instantiated ``visitor_class`` and the image has already been
processed with the visitor.
:exception: This function can raise an ``UnsupportedFormatError`` exception
if the specified format is not supported.
"""
# remember user options (don't want to rename function parameter above)
user_options = options
# start with a copy of the defaults
options = DEFAULT_OPTIONS.copy()
if user_options is not None:
# override with settings passed by caller
options.update(user_options)
if 'fill' not in options or options['fill'] is None:
options['fill'] = options['foreground']
# if input is a file like object, read from it (otherwise it is assumed to
# be a string)
if hasattr(input, 'read'):
input = input.read()
if options['debug']:
sys.stderr.write('%r\n' % (input,))
aaimg = AsciiArtImage(input, options['aspect'], options['textual'], options['widechars'])
if options['debug']:
sys.stderr.write('%s\n' % (aaimg,))
aaimg.recognize()
visitor = visitor_class(options)
visitor.visit_image(aaimg)
return visitor
def render(input, output=None, options=None):
"""
Render an ASCII art figure to a file or file-like.
:param input: If ``input`` is a basestring subclass (str or unicode), the
text contained in ``input`` is rendered. If ``input is a file-like
object, the text to render is taken using ``input.read()``.
:param output: If no ``output`` is specified, the resulting rendered image
is returned as a string. If output is a basestring subclass, a file
with the name of ``output`` contents is created and the rendered image
is saved there. If ``output`` is a file-like object, ``output.write()``
is used to save the rendered image.
:param options: A dictionary containing the settings. When ``None`` is
given, defaults are used.
:returns: This function returns a tuple ``(visitor, output)``, where
``visitor`` is visitor instance that rendered the image and ``output``
is the image as requested by the ``output`` parameter (a ``str`` if it
was ``None``, or a file-like object otherwise, which you should
``close()`` if needed).
:exception: This function can raise an ``UnsupportedFormatError`` exception
if the specified format is not supported.
"""
if options is None:
options = {}
close_output = False
if output is None:
import io
options['file_like'] = io.StringIO()
elif isinstance(output, str):
options['file_like'] = file(output, 'wb')
close_output = True
else:
options['file_like'] = output
try:
# late import of visitor classes to not cause any import errors for
# unsupported backends (this would happen when a library a backend
# depends on is not installed)
if options['format'].lower() == 'svg':
from . import svg
visitor_class = svg.SVGOutputVisitor
elif options['format'].lower() == 'pdf':
from . import pdf
visitor_class = pdf.PDFOutputVisitor
elif options['format'].lower() == 'ascii':
from . import aa
visitor_class = aa.AsciiOutputVisitor
else:
# for all other formats, it may be a bitmap type. let
# PIL decide if it can write a file of that type.
from . import pil
visitor_class = pil.PILOutputVisitor
# now render and output the image
visitor = process(input, visitor_class, options)
finally:
if close_output:
options['file_like'].close()
return (visitor, options['file_like'])
def main():
"""implement an useful main for use as command line program"""
import sys
import optparse
import os.path
parser = optparse.OptionParser(
usage = "%prog [options] [file]",
version = """\
%prog 0.5
Copyright (C) 2006-2010 aafigure-team
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License.
THIS SOFTWARE IS PROVIDED BY THE AAFIGURE-TEAM ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AAFIGURE-TEAM BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""",
description = "ASCII art to image (SVG, PNG, JPEG, PDF and more) converter."
)
parser.add_option("-e", "--encoding",
dest = "encoding",
action = "store",
help = "character encoding of input text",
default = DEFAULT_OPTIONS['encoding'],
)
parser.add_option("-w", "--wide-chars",
dest = "widechars",
action = "store",
help = "unicode properties to be treated as wide glyph (e.g. 'F,W,A')",
default = DEFAULT_OPTIONS['widechars'],
)
parser.add_option("-o", "--output",
dest = "output",
metavar = "FILE",
help = "write output to FILE"
)
parser.add_option("-t", "--type",
dest = "format",
help = "filetype: png, jpg, svg (by default autodetect from filename)",
default = None,
)
parser.add_option("-D", "--debug",
dest = "debug",
action = "store_true",
help = "enable debug outputs",
default = DEFAULT_OPTIONS['debug'],
)
parser.add_option("-T", "--textual",
dest = "textual",
action = "store_true",
help = "disable horizontal fill detection",
default = DEFAULT_OPTIONS['textual'],
)
parser.add_option("-s", "--scale",
dest = "scale",
action = "store",
type = 'float',
help = "set scale",
default = DEFAULT_OPTIONS['scale'],
)
parser.add_option("-a", "--aspect",
dest = "aspect",
action = "store",
type = 'float',
help = "set aspect ratio",
default = DEFAULT_OPTIONS['aspect'],
)
parser.add_option("-l", "--linewidth",
dest = "line_width",
action = "store",
type = 'float',
help = "set width, svg only",
default = DEFAULT_OPTIONS['line_width'],
)
parser.add_option("--proportional",
dest = "proportional",
action = "store_true",
help = "use proportional font instead of fixed width",
default = DEFAULT_OPTIONS['proportional'],
)
parser.add_option("-f", "--foreground",
dest = "foreground",
action = "store",
help = "foreground color default=%default",
default = DEFAULT_OPTIONS['foreground'],
)
parser.add_option("-x", "--fill",
dest = "fill",
action = "store",
help = "foreground color default=foreground",
default = None,
)
parser.add_option("-b", "--background",
dest = "background",
action = "store",
help = "foreground color default=%default",
default = DEFAULT_OPTIONS['background'],
)
parser.add_option("-O", "--option",
dest = "_extra_options",
action = "append",
help = "pass special options to backends (expert user)",
)
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("too many arguments")
if options.format is None:
if options.output is None:
parser.error("Please specify output format with --type")
else:
options.format = os.path.splitext(options.output)[1][1:]
if args:
_input = file(args[0])
else:
_input = sys.stdin
input = codecs.getreader(options.encoding)(_input)
if options.output is None:
output = sys.stdout
else:
output = file(options.output, 'wb')
# explicit copying of parameters to the options dictionary
options_dict = {}
for key in ('widechars', 'textual', 'proportional',
'line_width', 'aspect', 'scale',
'format', 'debug'):
options_dict[key] = getattr(options, key)
# ensure all color parameters start with a '#'
# this is for the convenience of the user as typing the shell comment
# character isn't for everyone ;-)
for color in ('foreground', 'background', 'fill'):
value = getattr(options, color)
if value is not None:
if value[0] != '#':
options_dict[color] = '#%s' % value
else:
options_dict[color] = value
# copy extra options
if options._extra_options is not None:
for keyvalue in options._extra_options:
try:
key, value = keyvalue.split('=')
except ValueError:
parser.error('--option must be in the format <key>=<value> (not %r)' % (keyvalue,))
options_dict[key] = value
if options.debug:
sys.stderr.write('options=%r\n' % (options_dict,))
try:
(visitor, output) = render(input, output, options_dict)
output.close()
except UnsupportedFormatError as e:
print("ERROR: Can't output format '%s': %s" % (options.format, e))
# when module is run, run the command line tool
if __name__ == '__main__':
main()
| bsd-3-clause | 378,366,602,091,080,260 | 36.610294 | 122 | 0.503769 | false |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/encodings/iso2022_jp_1.py | 816 | 1061 | #
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 | -3,339,413,472,680,567,300 | 26.205128 | 74 | 0.702168 | false |
StephenChusang/py-faster-rcnn-tracker | lib/datasetfactory/ILSVRC.py | 1 | 5021 | import os
import os.path as osp
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
# from utils.cython_bbox import bbox_overlaps
years = {'2013': '2013',
'2014': ['0000', '0001', '0002', '0003', '0004', '0005', '0006']}
name = 'ILSVRC'
_MAX_TRAIN_NUM = 20000
def prepare_train_text(dataset):
""" Prepare training text with xml annotation files.
"""
# First step: get images' parent directories
ann_root = osp.join(dataset, 'Annotations')
_root_2013 = name + years['2013'] + '_train'
_2013 = osp.join(ann_root, 'DET', 'train', _root_2013)
dirs_2013 = [osp.join('DET', 'train', _root_2013, dir) for dir in os.listdir(_2013)]
dirs_2014 = [osp.join('DET', 'train', name + '2014' + '_train_' + sub) for sub in years['2014']]
dirs = dirs_2013 + dirs_2014
# Second step: get all the xml file paths
xmls = []
for _dir in dirs:
this_xmls = [osp.join(_dir, xml) for xml in os.listdir(osp.join(ann_root, _dir))]
xmls += this_xmls
print 'There are {} xml files.'.format(len(xmls))
# Third step: parse xml files and assign class labels
# if 'sysnets.txt' exists, we skip this part since it is time-consuming
if not os.path.exists(osp.join(dataset, 'sysnets.txt')):
sysnets = open(osp.join(dataset, 'sysnets.txt'), 'wb')
classes = []
for xml in xmls:
filename = osp.join(ann_root, xml)
tree = ET.parse(filename)
objs = tree.findall('object')
for obj in objs:
objname = obj.find('name').text.strip()
if objname not in classes:
classes.append(objname)
classes.sort()
# insert __background__
classes.insert(0, '__background__')
for ind, _class in enumerate(classes):
sysnets.write(_class + ' ' + str(ind) + '\n')
sysnets.close()
else:
print 'sysnets.txt exists and skip building sysnets.txt'
# Fourth step: write train
train_txt = open(osp.join(dataset, 'train.txt'), 'wb')
xmls = np.random.permutation(xmls)[: _MAX_TRAIN_NUM]
for ix, xml in enumerate(xmls):
img_path = osp.splitext(xml)[0]
train_txt.write(img_path + '\n')
if (ix + 1) % 1000 == 0:
print 'Processed {} files'.format(ix + 1)
train_txt.close()
def load_annotation(num_classes, xml, class_indexes):
tree = ET.parse(xml)
objs = tree.findall('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = class_indexes[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _load_data(dataset, class_indexes):
train_txt = osp.join(dataset, 'train.txt')
with open(train_txt, 'rb') as f:
train_datas = [train_data.strip('\n') for train_data in f.readlines()]
print 'Totally {} training files'.format(len(train_datas))
image = [osp.join(dataset, 'Data', train_data) + '.JPEG' for train_data in train_datas]
annotations = [osp.join(dataset, 'Annotations', train_data) + '.xml' for train_data in train_datas]
roidb = [load_annotation(len(class_indexes), xml, class_indexes) for xml in annotations]
# add image path to each entry
for ind, entry in enumerate(roidb):
entry['image'] = image[ind]
return roidb
def _load_class_labels(dataset):
sysnets = osp.join(dataset, 'sysnets.txt')
sysnets = open(sysnets, 'rb')
clabels = [clabel.strip('\n') for clabel in sysnets.readlines()]
class_labels = {}
class_indexes = {}
for clabel in clabels:
clabel = clabel.split()
class_labels[int(clabel[1])] = clabel[0]
class_indexes[clabel[0]] = int(clabel[1])
return class_labels, class_indexes
def ILSVRC_handler(dataset):
dataset = dataset['dataset']
class_labels, class_indexes = _load_class_labels(dataset)
roidb = _load_data(dataset, class_indexes)
return class_labels, roidb
if __name__ == '__main__':
dataset = osp.join('data', 'ILSVRC2015')
prepare_train_text(dataset)
| mit | 2,989,938,069,268,150,000 | 33.627586 | 103 | 0.599681 | false |
scott-maddox/fdint | scripts/gen_test_scfd.py | 1 | 5280 | # Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
'''
Uses numerical integration to calculate accurate values to test against.
This should only be run after `python setup.py build_ext --inplace`.
'''
import os
import sys
import fdint
tests_dir = os.path.join(os.path.dirname(__file__), '../fdint/tests/')
import warnings
import numpy
from numpy import exp, sqrt
from scipy.integrate import quad
INV_SQRT_PI_2 = 1.1283791670955126 # 2/sqrt(pi)
def quad_nonparabolic(phi, alpha):
def func(x):
return sqrt(x*(1+alpha*x))*(1+2*alpha*x)/(1.+exp(x-phi))*INV_SQRT_PI_2
r = quad(func, 0, numpy.inf,epsabs=1e-300,epsrel=1e-13,limit=100)
return r[0], r[1]
# phis = numpy.array([-50,-3,-2,-1,0,1,4,5,7,10,15,20,30,40,50], dtype=float)
phis = numpy.linspace(-50, 50, 101, dtype=float)
def write_header(f, modname, dependencies=''):
f.write("""# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
# This file was generated by `scripts/gen_test_scfd.py`.
# Do not edit this file directly, or your changes will be lost.
'''
Tests the `{modname}` module.
'''
# Make sure we import the local package
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from fdint import {modname}
import unittest
import numpy
import warnings
""".format(modname=modname))
f.write(dependencies)
f.write('\n')
f.write('class Test_{modname}(unittest.TestCase):\n'
''.format(modname=modname.upper()))
f.write('''
def assert_rtol(self, a, b, rtol):
assert rtol >= 0
rerr = abs(a-b)/a
if rerr > rtol:
self.fail('Outside of relative tolerance of {}: {}'
''.format(rtol, rerr))
''')
f.write('''
def assert_all_rtol(self, a, b, rtol):
assert (rtol >= 0).all()
a = numpy.array(a)
b = numpy.array(b)
rtol = numpy.array(rtol)
rerr = abs(a-b)/a
if (rerr > rtol).all():
self.fail('Outside of relative tolerance of {}: {}'
''.format(rtol, rerr))
''')
##################
# Test scfd module
modname='scfd'
alphas = numpy.linspace(0., 0.15, 5)
fpath = os.path.join(tests_dir, 'test_{modname}.py'.format(modname=modname))
with open(fpath, 'w') as f:
mod = getattr(fdint, modname)
write_header(f, modname)
fname = 'nonparabolic'
# scalar
i = 0
for alpha in alphas:
for phi in phis:
i += 1
with warnings.catch_warnings():
warnings.simplefilter("ignore")
true_nu, aerr_est = quad_nonparabolic(phi, alpha)
nu = getattr(mod,'{fname}'.format(fname=fname))(phi, alpha)
aerr = abs(nu-true_nu)
rtol = max(abs(2*aerr/true_nu), abs(2*aerr_est/true_nu))
suppress_warnings = (rtol > 2e-7 or
alpha >= 0.075 and phi >= 10 or
alpha >= 0.15 and phi >= 5 or
phi >= 40)
# scalar
f.write('\n')
f.write(' def test_{fname}_{i}(self):\n'.format(fname=fname,i=i))
f.write(' phi = {}\n'.format(phi))
f.write(' alpha = {}\n'.format(alpha))
if suppress_warnings:
f.write(' with warnings.catch_warnings():\n')
f.write(' warnings.simplefilter("ignore")\n ')
f.write(' nu = {modname}.{fname}(phi, alpha)\n'
.format(modname=modname, fname=fname))
f.write(' true_nu = {}\n'
.format(true_nu))
f.write(' rtol = {:.0e}\n'.format(rtol))
f.write(' self.assert_rtol(nu, true_nu, rtol)\n')
# vector
f.write(' vphi = numpy.zeros(2); vphi.fill(phi)\n')
f.write(' valpha = numpy.zeros(2); valpha.fill(alpha)\n')
if suppress_warnings:
f.write(' with warnings.catch_warnings():\n')
f.write(' warnings.simplefilter("ignore")\n ')
f.write(' vnu = {modname}.{fname}(vphi, valpha)\n'
.format(modname=modname, fname=fname))
f.write(' vtrue_nu = numpy.zeros(2); vtrue_nu.fill(true_nu)\n')
f.write(' vrtol = numpy.zeros(2); vrtol.fill(rtol)\n')
f.write(' self.assert_all_rtol(vnu, vtrue_nu, vrtol)\n')
# buffered vector
if suppress_warnings:
f.write(' with warnings.catch_warnings():\n')
f.write(' warnings.simplefilter("ignore")\n ')
f.write(' vnu = numpy.zeros(2); {modname}.{fname}(vphi, valpha, vnu)\n'
.format(modname=modname, fname=fname))
f.write(' self.assert_all_rtol(vnu, vtrue_nu, vrtol)\n')
f.write('\n')
f.write('if __name__ == "__main__":\n')
f.write(' unittest.main()')
| bsd-3-clause | -5,182,951,187,604,252,000 | 37.540146 | 90 | 0.536742 | false |
Johnetordoff/osf.io | admin/osf_groups/views.py | 6 | 2534 | from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse
from django.views.generic import FormView, ListView
from osf.models import OSFGroup
from admin.osf_groups.forms import OSFGroupSearchForm
from admin.base.views import GuidView
from admin.osf_groups.serializers import serialize_group
class OSFGroupsView(PermissionRequiredMixin, GuidView):
""" Allow authorized admin user to view an osf group
"""
template_name = 'osf_groups/osf_groups.html'
context_object_name = 'group'
permission_required = 'osf.view_group'
raise_exception = True
def get_object(self, queryset=None):
id = self.kwargs.get('id')
osf_group = OSFGroup.objects.get(_id=id)
return serialize_group(osf_group)
class OSFGroupsFormView(PermissionRequiredMixin, FormView):
template_name = 'osf_groups/search.html'
object_type = 'osf_group'
permission_required = 'osf.view_group'
raise_exception = True
form_class = OSFGroupSearchForm
def __init__(self):
self.redirect_url = None
super(OSFGroupsFormView, self).__init__()
def form_valid(self, form):
id = form.data.get('id').strip()
name = form.data.get('name').strip()
self.redirect_url = reverse('osf_groups:search')
if id:
self.redirect_url = reverse('osf_groups:osf_group', kwargs={'id': id})
elif name:
self.redirect_url = reverse('osf_groups:osf_groups_list',) + '?name={}'.format(name)
return super(OSFGroupsFormView, self).form_valid(form)
@property
def success_url(self):
return self.redirect_url
class OSFGroupsListView(PermissionRequiredMixin, ListView):
""" Allow authorized admin user to view list of osf groups
"""
template_name = 'osf_groups/osf_groups_list.html'
paginate_by = 10
paginate_orphans = 1
permission_required = 'osf.view_group'
raise_exception = True
def get_queryset(self):
name = self.request.GET.get('name')
if name:
return OSFGroup.objects.filter(name__icontains=name)
return OSFGroup.objects.all()
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'groups': list(map(serialize_group, query_set)),
'page': page,
}
| apache-2.0 | -2,715,642,400,069,713,400 | 31.487179 | 96 | 0.660221 | false |
ahmadio/edx-platform | lms/djangoapps/certificates/api.py | 23 | 14808 | """Certificates API
This is a Python API for generating certificates asynchronously.
Other Django apps should use the API functions defined in this module
rather than importing Django models directly.
"""
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from eventtracking import tracker
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.django import modulestore
from util.organizations_helpers import get_course_organizations
from certificates.models import (
CertificateStatuses,
certificate_status_for_student,
CertificateGenerationCourseSetting,
CertificateGenerationConfiguration,
ExampleCertificateSet,
GeneratedCertificate,
CertificateTemplate,
)
from certificates.queue import XQueueCertInterface
log = logging.getLogger("edx.certificate")
def get_certificates_for_user(username):
"""
Retrieve certificate information for a particular user.
Arguments:
username (unicode): The identifier of the user.
Returns: list
Example Usage:
>>> get_certificates_for_user("bob")
[
{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
return [
{
"username": username,
"course_key": cert.course_id,
"type": cert.mode,
"status": cert.status,
"grade": cert.grade,
"created": cert.created_date,
"modified": cert.modified_date,
# NOTE: the download URL is not currently being set for webview certificates.
# In the future, we can update this to construct a URL to the webview certificate
# for courses that have this feature enabled.
"download_url": (
cert.download_url
if cert.status == CertificateStatuses.downloadable
else None
),
}
for cert in GeneratedCertificate.objects.filter(user__username=username).order_by("course_id")
]
def generate_user_certificates(student, course_key, course=None, insecure=False, generation_mode='batch',
forced_grade=None):
"""
It will add the add-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'. It also emits
`edx.certificate.created` event for analytics.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
insecure - (Boolean)
generation_mode - who has requested certificate generation. Its value should `batch`
in case of django command and `self` if student initiated the request.
forced_grade - a string indicating to replace grade parameter. if present grading
will be skipped.
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
generate_pdf = not has_html_certificates_enabled(course_key, course)
status, cert = xqueue.add_cert(student, course_key,
course=course,
generate_pdf=generate_pdf,
forced_grade=forced_grade)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
emit_certificate_event('created', student, course_key, course, {
'user_id': student.id,
'course_id': unicode(course_key),
'certificate_id': cert.verify_uuid,
'enrollment_mode': cert.mode,
'generation_mode': generation_mode
})
return status
def regenerate_user_certificates(student, course_key, course=None,
forced_grade=None, template_file=None, insecure=False):
"""
It will add the regen-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
grade_value - The grade string, such as "Distinction"
template_file - The template file used to render this certificate
insecure - (Boolean)
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
generate_pdf = not has_html_certificates_enabled(course_key, course)
return xqueue.regen_cert(
student,
course_key,
course=course,
forced_grade=forced_grade,
template_file=template_file,
generate_pdf=generate_pdf
)
def certificate_downloadable_status(student, course_key):
"""
Check the student existing certificates against a given course.
if status is not generating and not downloadable or error then user can view the generate button.
Args:
student (user object): logged-in user
course_key (CourseKey): ID associated with the course
Returns:
Dict containing student passed status also download url for cert if available
"""
current_status = certificate_status_for_student(student, course_key)
# If the certificate status is an error user should view that status is "generating".
# On the back-end, need to monitor those errors and re-submit the task.
response_data = {
'is_downloadable': False,
'is_generating': True if current_status['status'] in [CertificateStatuses.generating,
CertificateStatuses.error] else False,
'download_url': None
}
if current_status['status'] == CertificateStatuses.downloadable:
response_data['is_downloadable'] = True
response_data['download_url'] = current_status['download_url']
return response_data
def set_cert_generation_enabled(course_key, is_enabled):
"""Enable or disable self-generated certificates for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
The second flag should be enabled *only* when someone has successfully
generated example certificates for the course. This helps avoid
configuration errors (for example, not having a template configured
for the course installed on the workers). The UI for the instructor
dashboard enforces this constraint.
Arguments:
course_key (CourseKey): The course identifier.
Keyword Arguments:
is_enabled (boolean): If provided, enable/disable self-generated
certificates for this course.
"""
CertificateGenerationCourseSetting.set_enabled_for_course(course_key, is_enabled)
cert_event_type = 'enabled' if is_enabled else 'disabled'
event_name = '.'.join(['edx', 'certificate', 'generation', cert_event_type])
tracker.emit(event_name, {
'course_id': unicode(course_key),
})
if is_enabled:
log.info(u"Enabled self-generated certificates for course '%s'.", unicode(course_key))
else:
log.info(u"Disabled self-generated certificates for course '%s'.", unicode(course_key))
def cert_generation_enabled(course_key):
"""Check whether certificate generation is enabled for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
Certificates are enabled for a course only when both switches
are set to True.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
boolean: Whether self-generated certificates are enabled
for the course.
"""
return (
CertificateGenerationConfiguration.current().enabled and
CertificateGenerationCourseSetting.is_enabled_for_course(course_key)
)
def generate_example_certificates(course_key):
"""Generate example certificates for a course.
Example certificates are used to validate that certificates
are configured correctly for the course. Staff members can
view the example certificates before enabling
the self-generated certificates button for students.
Several example certificates may be generated for a course.
For example, if a course offers both verified and honor certificates,
examples of both types of certificate will be generated.
If an error occurs while starting the certificate generation
job, the errors will be recorded in the database and
can be retrieved using `example_certificate_status()`.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
None
"""
xqueue = XQueueCertInterface()
for cert in ExampleCertificateSet.create_example_set(course_key):
xqueue.add_example_cert(cert)
def has_html_certificates_enabled(course_key, course=None):
"""
Determine if a course has html certificates enabled.
Arguments:
course_key (CourseKey|str): A course key or a string representation
of one.
course (CourseDescriptor|CourseOverview): A course.
"""
html_certificates_enabled = False
try:
if not isinstance(course_key, CourseKey):
course_key = CourseKey.from_string(course_key)
course = course if course else CourseOverview.get_from_id(course_key)
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False) and course.cert_html_view_enabled:
html_certificates_enabled = True
except: # pylint: disable=bare-except
pass
return html_certificates_enabled
def example_certificates_status(course_key):
"""Check the status of example certificates for a course.
This will check the *latest* example certificate task.
This is generally what we care about in terms of enabling/disabling
self-generated certificates for a course.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
list
Example Usage:
>>> from certificates import api as certs_api
>>> certs_api.example_certificate_status(course_key)
[
{
'description': 'honor',
'status': 'success',
'download_url': 'http://www.example.com/abcd/honor_cert.pdf'
},
{
'description': 'verified',
'status': 'error',
'error_reason': 'No template found!'
}
]
"""
return ExampleCertificateSet.latest_status(course_key)
def get_certificate_url(user_id, course_id):
"""
:return certificate url
"""
url = ""
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
url = reverse(
'certificates:html_view',
kwargs={
"user_id": str(user_id),
"course_id": unicode(course_id),
}
)
else:
try:
if isinstance(course_id, basestring):
course_id = CourseKey.from_string(course_id)
user_certificate = GeneratedCertificate.objects.get(
user=user_id,
course_id=course_id
)
url = user_certificate.download_url
except GeneratedCertificate.DoesNotExist:
log.critical(
'Unable to lookup certificate\n'
'user id: %d\n'
'course: %s', user_id, unicode(course_id)
)
return url
def get_active_web_certificate(course, is_preview_mode=None):
"""
Retrieves the active web certificate configuration for the specified course
"""
certificates = getattr(course, 'certificates', '{}')
configurations = certificates.get('certificates', [])
for config in configurations:
if config.get('is_active') or is_preview_mode:
return config
return None
def get_certificate_template(course_key, mode):
"""
Retrieves the custom certificate template based on course_key and mode.
"""
org_id, template = None, None
# fetch organization of the course
course_organization = get_course_organizations(course_key)
if course_organization:
org_id = course_organization[0]['id']
if org_id and mode:
template = CertificateTemplate.objects.filter(
organization_id=org_id,
course_key=course_key,
mode=mode,
is_active=True
)
# if don't template find by org and mode
if not template and org_id and mode:
template = CertificateTemplate.objects.filter(
organization_id=org_id,
mode=mode,
is_active=True
)
# if don't template find by only org
if not template and org_id:
template = CertificateTemplate.objects.filter(
organization_id=org_id,
is_active=True
)
# if we still don't template find by only course mode
if not template and mode:
template = CertificateTemplate.objects.filter(
mode=mode,
is_active=True
)
return template[0].template if template else None
def emit_certificate_event(event_name, user, course_id, course=None, event_data=None):
"""
Emits certificate event.
"""
event_name = '.'.join(['edx', 'certificate', event_name])
if course is None:
course = modulestore().get_course(course_id, depth=0)
context = {
'org_id': course.org,
'course_id': unicode(course_id)
}
data = {
'user_id': user.id,
'course_id': unicode(course_id),
'certificate_url': get_certificate_url(user.id, course_id)
}
event_data = event_data or {}
event_data.update(data)
with tracker.get_tracker().context(event_name, context):
tracker.emit(event_name, event_data)
| agpl-3.0 | 3,544,022,867,712,887,000 | 32.808219 | 105 | 0.643976 | false |
thezawad/flexx | make/copyright.py | 21 | 2104 | """ Update all copyright notices to the current year.
Does a search for a specific copyright notice of last year and replaces
it with a version for this year. Other copyright mentionings are listed,
but left unmodified.
If an argument is given, use that as the name of the copyright holder,
otherwise use the name specifief in `make/__init__.py`.
"""
import os
import time
from make import ROOT_DIR, NAME
def copyright(name=''):
# Initialize
if not name:
name = '%s Development Team' % NAME
TEMPLATE = "# Copyright (c) %i, %s."
CURYEAR = int(time.strftime('%Y'))
OLDTEXT = TEMPLATE % (CURYEAR - 1, name)
NEWTEXT = TEMPLATE % (CURYEAR, name)
# Initialize counts
count_ok, count_replaced = 0, 0
print('looking for: ' + OLDTEXT)
# Processing the whole root directory
for dirpath, dirnames, filenames in os.walk(ROOT_DIR):
# Check if we should skip this directory
reldirpath = os.path.relpath(dirpath, ROOT_DIR)
if reldirpath[0] in '._' or reldirpath.endswith('__pycache__'):
continue
if os.path.split(reldirpath)[0] in ('build', 'dist'):
continue
# Process files
for fname in filenames:
if not fname.endswith('.py'):
continue
# Open and check
filename = os.path.join(dirpath, fname)
text = open(filename, 'rt').read()
if NEWTEXT in text:
count_ok += 1
elif OLDTEXT in text:
text = text.replace(OLDTEXT, NEWTEXT)
open(filename, 'wt').write(text)
print(
' Update copyright year in %s/%s' %
(reldirpath, fname))
count_replaced += 1
elif 'copyright' in text[:200].lower():
print(
' Unknown copyright mentioned in %s/%s' %
(reldirpath, fname))
# Report
print('Replaced %i copyright statements' % count_replaced)
print('Found %i copyright statements up to date' % count_ok)
| bsd-2-clause | 1,865,558,450,328,426,500 | 33.491803 | 72 | 0.57462 | false |
ashutoshvt/psi4 | psi4/driver/qmmm.py | 7 | 5473 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with classes to integrate MM charges into
a QM calculation.
"""
from psi4.driver import *
class Diffuse(object):
def __init__(self, molecule, basisname, ribasisname):
self.molecule = molecule
self.basisname = basisname
self.ribasisname = ribasisname
self.basis = None
self.ribasis = None
self.da = None
self.Da = None
self.wfn = None
def __str__(self):
s = ' => Diffuse <=\n\n'
s = s + ' ' + str(self.molecule) + '\n'
s = s + ' ' + self.basisname + '\n'
s = s + ' ' + self.ribasisname + '\n'
s = s + '\n'
return s
def fitScf(self):
"""Function to run scf and fit a system of diffuse charges to
resulting density.
"""
basisChanged = core.has_option_changed("BASIS")
ribasisChanged = core.has_option_changed("DF_BASIS_SCF")
scftypeChanged = core.has_option_changed("SCF_TYPE")
basis = core.get_option("BASIS")
ribasis = core.get_option("DF_BASIS_SCF")
scftype = core.get_global_option("SCF_TYPE")
core.print_out(" => Diffuse SCF (Determines Da) <=\n\n")
core.set_global_option("BASIS", self.basisname)
core.set_global_option("DF_BASIS_SCF", self.ribasisname)
core.set_global_option("SCF_TYPE", "DF")
E, ref = energy('scf', return_wfn=True, molecule=self.molecule)
self.wfn = ref
core.print_out("\n")
self.fitGeneral()
core.clean()
core.set_global_option("BASIS", basis)
core.set_global_option("DF_BASIS_SCF", ribasis)
core.set_global_option("SCF_TYPE", scftype)
if not basisChanged:
core.revoke_option_changed("BASIS")
if not ribasisChanged:
core.revoke_option_changed("DF_BASIS_SCF")
if not scftypeChanged:
core.revoke_option_changed("SCF_TYPE")
def fitGeneral(self):
"""Function to perform a general fit of diffuse charges
to wavefunction density.
"""
core.print_out(" => Diffuse Charge Fitting (Determines da) <=\n\n")
self.Da = self.wfn.Da()
self.basis = self.wfn.basisset()
parser = core.Gaussian94BasisSetParser()
self.ribasis = core.BasisSet.construct(parser, self.molecule, "DF_BASIS_SCF")
fitter = core.DFChargeFitter()
fitter.setPrimary(self.basis)
fitter.setAuxiliary(self.ribasis)
fitter.setD(self.Da)
self.da = fitter.fit()
self.da.scale(2.0)
def populateExtern(self, extern):
# Electronic Part
extern.addBasis(self.ribasis, self.da)
# Nuclear Part
for A in range(0, self.molecule.natom()):
extern.addCharge(self.molecule.Z(A), self.molecule.x(A), self.molecule.y(A), self.molecule.z(A))
class QMMM(object):
def __init__(self):
self.charges = []
self.diffuses = []
self.extern = core.ExternalPotential()
def addDiffuse(self, diffuse):
"""Function to add a diffuse charge field *diffuse*."""
self.diffuses.append(diffuse)
def addChargeBohr(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Bohr.
"""
self.charges.append([Q, x, y, z])
def addChargeAngstrom(self, Q, x, y, z):
"""Function to add a point charge of magnitude *Q* at
position (*x*, *y*, *z*) Angstroms.
"""
self.charges.append([Q, x / constants.bohr2angstroms, y / constants.bohr2angstroms, z / constants.bohr2angstroms])
def __str__(self):
s = ' ==> QMMM <==\n\n'
s = s + ' => Charges (a.u.) <=\n\n'
s = s + ' %11s %11s %11s %11s\n' % ('Z', 'x', 'y', 'z')
for k in range(0, len(self.charges)):
s = s + ' %11.7f %11.3E %11.3E %11.3E\n' % (self.charges[k][0], self.charges[k][1], self.charges[k][2], self.charges[k][3])
s = s + '\n'
s = s + ' => Diffuses <=\n\n'
for k in range(0, len(self.diffuses)):
s = s + str(self.diffuses[k])
return s
def populateExtern(self):
"""Function to define a charge field external to the
molecule through point and diffuse charges.
"""
# Charges
for charge in self.charges:
self.extern.addCharge(charge[0], charge[1], charge[2], charge[3])
# Diffuses
for diffuse in self.diffuses:
diffuse.populateExtern(self.extern)
| lgpl-3.0 | 747,906,247,754,376,000 | 30.274286 | 138 | 0.595286 | false |
wgcv/SWW-Crashphone | lib/python2.7/site-packages/django/conf/locale/mk/formats.py | 43 | 1744 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| apache-2.0 | -2,941,194,111,753,959,000 | 37.755556 | 77 | 0.493693 | false |
vnpy/vnpy | vnpy/api/rohon/rohon_constant.py | 5 | 39659 | THOST_FTDC_EXP_Normal = '0'
THOST_FTDC_EXP_GenOrderByTrade = '1'
THOST_FTDC_ICT_EID = '0'
THOST_FTDC_ICT_IDCard = '1'
THOST_FTDC_ICT_OfficerIDCard = '2'
THOST_FTDC_ICT_PoliceIDCard = '3'
THOST_FTDC_ICT_SoldierIDCard = '4'
THOST_FTDC_ICT_HouseholdRegister = '5'
THOST_FTDC_ICT_Passport = '6'
THOST_FTDC_ICT_TaiwanCompatriotIDCard = '7'
THOST_FTDC_ICT_HomeComingCard = '8'
THOST_FTDC_ICT_LicenseNo = '9'
THOST_FTDC_ICT_TaxNo = 'A'
THOST_FTDC_ICT_HMMainlandTravelPermit = 'B'
THOST_FTDC_ICT_TwMainlandTravelPermit = 'C'
THOST_FTDC_ICT_DrivingLicense = 'D'
THOST_FTDC_ICT_SocialID = 'F'
THOST_FTDC_ICT_LocalID = 'G'
THOST_FTDC_ICT_BusinessRegistration = 'H'
THOST_FTDC_ICT_HKMCIDCard = 'I'
THOST_FTDC_ICT_AccountsPermits = 'J'
THOST_FTDC_ICT_FrgPrmtRdCard = 'K'
THOST_FTDC_ICT_CptMngPrdLetter = 'L'
THOST_FTDC_ICT_UniformSocialCreditCode = 'N'
THOST_FTDC_ICT_CorporationCertNo = 'O'
THOST_FTDC_ICT_OtherCard = 'x'
THOST_FTDC_IR_All = '1'
THOST_FTDC_IR_Group = '2'
THOST_FTDC_IR_Single = '3'
THOST_FTDC_DR_All = '1'
THOST_FTDC_DR_Group = '2'
THOST_FTDC_DR_Single = '3'
THOST_FTDC_DS_Asynchronous = '1'
THOST_FTDC_DS_Synchronizing = '2'
THOST_FTDC_DS_Synchronized = '3'
THOST_FTDC_BDS_Synchronized = '1'
THOST_FTDC_BDS_Synchronizing = '2'
THOST_FTDC_ECS_NoConnection = '1'
THOST_FTDC_ECS_QryInstrumentSent = '2'
THOST_FTDC_ECS_GotInformation = '9'
THOST_FTDC_TCS_NotConnected = '1'
THOST_FTDC_TCS_Connected = '2'
THOST_FTDC_TCS_QryInstrumentSent = '3'
THOST_FTDC_TCS_SubPrivateFlow = '4'
THOST_FTDC_FC_DataAsync = '1'
THOST_FTDC_FC_ForceUserLogout = '2'
THOST_FTDC_FC_UserPasswordUpdate = '3'
THOST_FTDC_FC_BrokerPasswordUpdate = '4'
THOST_FTDC_FC_InvestorPasswordUpdate = '5'
THOST_FTDC_FC_OrderInsert = '6'
THOST_FTDC_FC_OrderAction = '7'
THOST_FTDC_FC_SyncSystemData = '8'
THOST_FTDC_FC_SyncBrokerData = '9'
THOST_FTDC_FC_BachSyncBrokerData = 'A'
THOST_FTDC_FC_SuperQuery = 'B'
THOST_FTDC_FC_ParkedOrderInsert = 'C'
THOST_FTDC_FC_ParkedOrderAction = 'D'
THOST_FTDC_FC_SyncOTP = 'E'
THOST_FTDC_FC_DeleteOrder = 'F'
THOST_FTDC_BFC_ForceUserLogout = '1'
THOST_FTDC_BFC_UserPasswordUpdate = '2'
THOST_FTDC_BFC_SyncBrokerData = '3'
THOST_FTDC_BFC_BachSyncBrokerData = '4'
THOST_FTDC_BFC_OrderInsert = '5'
THOST_FTDC_BFC_OrderAction = '6'
THOST_FTDC_BFC_AllQuery = '7'
THOST_FTDC_BFC_log = 'a'
THOST_FTDC_BFC_BaseQry = 'b'
THOST_FTDC_BFC_TradeQry = 'c'
THOST_FTDC_BFC_Trade = 'd'
THOST_FTDC_BFC_Virement = 'e'
THOST_FTDC_BFC_Risk = 'f'
THOST_FTDC_BFC_Session = 'g'
THOST_FTDC_BFC_RiskNoticeCtl = 'h'
THOST_FTDC_BFC_RiskNotice = 'i'
THOST_FTDC_BFC_BrokerDeposit = 'j'
THOST_FTDC_BFC_QueryFund = 'k'
THOST_FTDC_BFC_QueryOrder = 'l'
THOST_FTDC_BFC_QueryTrade = 'm'
THOST_FTDC_BFC_QueryPosition = 'n'
THOST_FTDC_BFC_QueryMarketData = 'o'
THOST_FTDC_BFC_QueryUserEvent = 'p'
THOST_FTDC_BFC_QueryRiskNotify = 'q'
THOST_FTDC_BFC_QueryFundChange = 'r'
THOST_FTDC_BFC_QueryInvestor = 's'
THOST_FTDC_BFC_QueryTradingCode = 't'
THOST_FTDC_BFC_ForceClose = 'u'
THOST_FTDC_BFC_PressTest = 'v'
THOST_FTDC_BFC_RemainCalc = 'w'
THOST_FTDC_BFC_NetPositionInd = 'x'
THOST_FTDC_BFC_RiskPredict = 'y'
THOST_FTDC_BFC_DataExport = 'z'
THOST_FTDC_BFC_RiskTargetSetup = 'A'
THOST_FTDC_BFC_MarketDataWarn = 'B'
THOST_FTDC_BFC_QryBizNotice = 'C'
THOST_FTDC_BFC_CfgBizNotice = 'D'
THOST_FTDC_BFC_SyncOTP = 'E'
THOST_FTDC_BFC_SendBizNotice = 'F'
THOST_FTDC_BFC_CfgRiskLevelStd = 'G'
THOST_FTDC_BFC_TbCommand = 'H'
THOST_FTDC_BFC_DeleteOrder = 'J'
THOST_FTDC_BFC_ParkedOrderInsert = 'K'
THOST_FTDC_BFC_ParkedOrderAction = 'L'
THOST_FTDC_BFC_ExecOrderNoCheck = 'M'
THOST_FTDC_BFC_Designate = 'N'
THOST_FTDC_BFC_StockDisposal = 'O'
THOST_FTDC_BFC_BrokerDepositWarn = 'Q'
THOST_FTDC_BFC_CoverWarn = 'S'
THOST_FTDC_BFC_PreExecOrder = 'T'
THOST_FTDC_BFC_ExecOrderRisk = 'P'
THOST_FTDC_BFC_PosiLimitWarn = 'U'
THOST_FTDC_BFC_QryPosiLimit = 'V'
THOST_FTDC_BFC_FBSign = 'W'
THOST_FTDC_BFC_FBAccount = 'X'
THOST_FTDC_OAS_Submitted = 'a'
THOST_FTDC_OAS_Accepted = 'b'
THOST_FTDC_OAS_Rejected = 'c'
THOST_FTDC_OST_AllTraded = '0'
THOST_FTDC_OST_PartTradedQueueing = '1'
THOST_FTDC_OST_PartTradedNotQueueing = '2'
THOST_FTDC_OST_NoTradeQueueing = '3'
THOST_FTDC_OST_NoTradeNotQueueing = '4'
THOST_FTDC_OST_Canceled = '5'
THOST_FTDC_OST_Unknown = 'a'
THOST_FTDC_OST_NotTouched = 'b'
THOST_FTDC_OST_Touched = 'c'
THOST_FTDC_OSS_InsertSubmitted = '0'
THOST_FTDC_OSS_CancelSubmitted = '1'
THOST_FTDC_OSS_ModifySubmitted = '2'
THOST_FTDC_OSS_Accepted = '3'
THOST_FTDC_OSS_InsertRejected = '4'
THOST_FTDC_OSS_CancelRejected = '5'
THOST_FTDC_OSS_ModifyRejected = '6'
THOST_FTDC_PSD_Today = '1'
THOST_FTDC_PSD_History = '2'
THOST_FTDC_PDT_UseHistory = '1'
THOST_FTDC_PDT_NoUseHistory = '2'
THOST_FTDC_ER_Broker = '1'
THOST_FTDC_ER_Host = '2'
THOST_FTDC_ER_Maker = '3'
THOST_FTDC_PC_Futures = '1'
THOST_FTDC_PC_Options = '2'
THOST_FTDC_PC_Combination = '3'
THOST_FTDC_PC_Spot = '4'
THOST_FTDC_PC_EFP = '5'
THOST_FTDC_PC_SpotOption = '6'
THOST_FTDC_PC_TAS = '7'
THOST_FTDC_PC_MI = 'I'
THOST_FTDC_APC_FutureSingle = '1'
THOST_FTDC_APC_OptionSingle = '2'
THOST_FTDC_APC_Futures = '3'
THOST_FTDC_APC_Options = '4'
THOST_FTDC_APC_TradingComb = '5'
THOST_FTDC_APC_UnTradingComb = '6'
THOST_FTDC_APC_AllTrading = '7'
THOST_FTDC_APC_All = '8'
THOST_FTDC_IP_NotStart = '0'
THOST_FTDC_IP_Started = '1'
THOST_FTDC_IP_Pause = '2'
THOST_FTDC_IP_Expired = '3'
THOST_FTDC_D_Buy = '0'
THOST_FTDC_D_Sell = '1'
THOST_FTDC_PT_Net = '1'
THOST_FTDC_PT_Gross = '2'
THOST_FTDC_PD_Net = '1'
THOST_FTDC_PD_Long = '2'
THOST_FTDC_PD_Short = '3'
THOST_FTDC_SS_NonActive = '1'
THOST_FTDC_SS_Startup = '2'
THOST_FTDC_SS_Operating = '3'
THOST_FTDC_SS_Settlement = '4'
THOST_FTDC_SS_SettlementFinished = '5'
THOST_FTDC_RA_Trade = '0'
THOST_FTDC_RA_Settlement = '1'
THOST_FTDC_HF_Speculation = '1'
THOST_FTDC_HF_Arbitrage = '2'
THOST_FTDC_HF_Hedge = '3'
THOST_FTDC_HF_MarketMaker = '5'
THOST_FTDC_HF_SpecHedge = '6'
THOST_FTDC_HF_HedgeSpec = '7'
THOST_FTDC_BHF_Speculation = '1'
THOST_FTDC_BHF_Arbitrage = '2'
THOST_FTDC_BHF_Hedge = '3'
THOST_FTDC_CIDT_Speculation = '1'
THOST_FTDC_CIDT_Arbitrage = '2'
THOST_FTDC_CIDT_Hedge = '3'
THOST_FTDC_CIDT_MarketMaker = '5'
THOST_FTDC_OPT_AnyPrice = '1'
THOST_FTDC_OPT_LimitPrice = '2'
THOST_FTDC_OPT_BestPrice = '3'
THOST_FTDC_OPT_LastPrice = '4'
THOST_FTDC_OPT_LastPricePlusOneTicks = '5'
THOST_FTDC_OPT_LastPricePlusTwoTicks = '6'
THOST_FTDC_OPT_LastPricePlusThreeTicks = '7'
THOST_FTDC_OPT_AskPrice1 = '8'
THOST_FTDC_OPT_AskPrice1PlusOneTicks = '9'
THOST_FTDC_OPT_AskPrice1PlusTwoTicks = 'A'
THOST_FTDC_OPT_AskPrice1PlusThreeTicks = 'B'
THOST_FTDC_OPT_BidPrice1 = 'C'
THOST_FTDC_OPT_BidPrice1PlusOneTicks = 'D'
THOST_FTDC_OPT_BidPrice1PlusTwoTicks = 'E'
THOST_FTDC_OPT_BidPrice1PlusThreeTicks = 'F'
THOST_FTDC_OPT_FiveLevelPrice = 'G'
THOST_FTDC_OF_Open = '0'
THOST_FTDC_OF_Close = '1'
THOST_FTDC_OF_ForceClose = '2'
THOST_FTDC_OF_CloseToday = '3'
THOST_FTDC_OF_CloseYesterday = '4'
THOST_FTDC_OF_ForceOff = '5'
THOST_FTDC_OF_LocalForceClose = '6'
THOST_FTDC_FCC_NotForceClose = '0'
THOST_FTDC_FCC_LackDeposit = '1'
THOST_FTDC_FCC_ClientOverPositionLimit = '2'
THOST_FTDC_FCC_MemberOverPositionLimit = '3'
THOST_FTDC_FCC_NotMultiple = '4'
THOST_FTDC_FCC_Violation = '5'
THOST_FTDC_FCC_Other = '6'
THOST_FTDC_FCC_PersonDeliv = '7'
THOST_FTDC_ORDT_Normal = '0'
THOST_FTDC_ORDT_DeriveFromQuote = '1'
THOST_FTDC_ORDT_DeriveFromCombination = '2'
THOST_FTDC_ORDT_Combination = '3'
THOST_FTDC_ORDT_ConditionalOrder = '4'
THOST_FTDC_ORDT_Swap = '5'
THOST_FTDC_ORDT_DeriveFromBlockTrade = '6'
THOST_FTDC_ORDT_DeriveFromEFPTrade = '7'
THOST_FTDC_TC_IOC = '1'
THOST_FTDC_TC_GFS = '2'
THOST_FTDC_TC_GFD = '3'
THOST_FTDC_TC_GTD = '4'
THOST_FTDC_TC_GTC = '5'
THOST_FTDC_TC_GFA = '6'
THOST_FTDC_VC_AV = '1'
THOST_FTDC_VC_MV = '2'
THOST_FTDC_VC_CV = '3'
THOST_FTDC_CC_Immediately = '1'
THOST_FTDC_CC_Touch = '2'
THOST_FTDC_CC_TouchProfit = '3'
THOST_FTDC_CC_ParkedOrder = '4'
THOST_FTDC_CC_LastPriceGreaterThanStopPrice = '5'
THOST_FTDC_CC_LastPriceGreaterEqualStopPrice = '6'
THOST_FTDC_CC_LastPriceLesserThanStopPrice = '7'
THOST_FTDC_CC_LastPriceLesserEqualStopPrice = '8'
THOST_FTDC_CC_AskPriceGreaterThanStopPrice = '9'
THOST_FTDC_CC_AskPriceGreaterEqualStopPrice = 'A'
THOST_FTDC_CC_AskPriceLesserThanStopPrice = 'B'
THOST_FTDC_CC_AskPriceLesserEqualStopPrice = 'C'
THOST_FTDC_CC_BidPriceGreaterThanStopPrice = 'D'
THOST_FTDC_CC_BidPriceGreaterEqualStopPrice = 'E'
THOST_FTDC_CC_BidPriceLesserThanStopPrice = 'F'
THOST_FTDC_CC_BidPriceLesserEqualStopPrice = 'H'
THOST_FTDC_AF_Delete = '0'
THOST_FTDC_AF_Modify = '3'
THOST_FTDC_TR_Allow = '0'
THOST_FTDC_TR_CloseOnly = '1'
THOST_FTDC_TR_Forbidden = '2'
THOST_FTDC_OSRC_Participant = '0'
THOST_FTDC_OSRC_Administrator = '1'
THOST_FTDC_TRDT_SplitCombination = '#'
THOST_FTDC_TRDT_Common = '0'
THOST_FTDC_TRDT_OptionsExecution = '1'
THOST_FTDC_TRDT_OTC = '2'
THOST_FTDC_TRDT_EFPDerived = '3'
THOST_FTDC_TRDT_CombinationDerived = '4'
THOST_FTDC_TRDT_BlockTrade = '5'
THOST_FTDC_SPOST_Common = '#'
THOST_FTDC_SPOST_Tas = '0'
THOST_FTDC_PSRC_LastPrice = '0'
THOST_FTDC_PSRC_Buy = '1'
THOST_FTDC_PSRC_Sell = '2'
THOST_FTDC_PSRC_OTC = '3'
THOST_FTDC_IS_BeforeTrading = '0'
THOST_FTDC_IS_NoTrading = '1'
THOST_FTDC_IS_Continous = '2'
THOST_FTDC_IS_AuctionOrdering = '3'
THOST_FTDC_IS_AuctionBalance = '4'
THOST_FTDC_IS_AuctionMatch = '5'
THOST_FTDC_IS_Closed = '6'
THOST_FTDC_IER_Automatic = '1'
THOST_FTDC_IER_Manual = '2'
THOST_FTDC_IER_Fuse = '3'
THOST_FTDC_BS_NoUpload = '1'
THOST_FTDC_BS_Uploaded = '2'
THOST_FTDC_BS_Failed = '3'
THOST_FTDC_RS_All = '1'
THOST_FTDC_RS_ByProduct = '2'
THOST_FTDC_RP_ByVolume = '1'
THOST_FTDC_RP_ByFeeOnHand = '2'
THOST_FTDC_RL_Level1 = '1'
THOST_FTDC_RL_Level2 = '2'
THOST_FTDC_RL_Level3 = '3'
THOST_FTDC_RL_Level4 = '4'
THOST_FTDC_RL_Level5 = '5'
THOST_FTDC_RL_Level6 = '6'
THOST_FTDC_RL_Level7 = '7'
THOST_FTDC_RL_Level8 = '8'
THOST_FTDC_RL_Level9 = '9'
THOST_FTDC_RSD_ByPeriod = '1'
THOST_FTDC_RSD_ByStandard = '2'
THOST_FTDC_MT_Out = '0'
THOST_FTDC_MT_In = '1'
THOST_FTDC_ISPI_MortgageRatio = '4'
THOST_FTDC_ISPI_MarginWay = '5'
THOST_FTDC_ISPI_BillDeposit = '9'
THOST_FTDC_ESPI_MortgageRatio = '1'
THOST_FTDC_ESPI_OtherFundItem = '2'
THOST_FTDC_ESPI_OtherFundImport = '3'
THOST_FTDC_ESPI_CFFEXMinPrepa = '6'
THOST_FTDC_ESPI_CZCESettlementType = '7'
THOST_FTDC_ESPI_ExchDelivFeeMode = '9'
THOST_FTDC_ESPI_DelivFeeMode = '0'
THOST_FTDC_ESPI_CZCEComMarginType = 'A'
THOST_FTDC_ESPI_DceComMarginType = 'B'
THOST_FTDC_ESPI_OptOutDisCountRate = 'a'
THOST_FTDC_ESPI_OptMiniGuarantee = 'b'
THOST_FTDC_SPI_InvestorIDMinLength = '1'
THOST_FTDC_SPI_AccountIDMinLength = '2'
THOST_FTDC_SPI_UserRightLogon = '3'
THOST_FTDC_SPI_SettlementBillTrade = '4'
THOST_FTDC_SPI_TradingCode = '5'
THOST_FTDC_SPI_CheckFund = '6'
THOST_FTDC_SPI_CommModelRight = '7'
THOST_FTDC_SPI_MarginModelRight = '9'
THOST_FTDC_SPI_IsStandardActive = '8'
THOST_FTDC_SPI_UploadSettlementFile = 'U'
THOST_FTDC_SPI_DownloadCSRCFile = 'D'
THOST_FTDC_SPI_SettlementBillFile = 'S'
THOST_FTDC_SPI_CSRCOthersFile = 'C'
THOST_FTDC_SPI_InvestorPhoto = 'P'
THOST_FTDC_SPI_CSRCData = 'R'
THOST_FTDC_SPI_InvestorPwdModel = 'I'
THOST_FTDC_SPI_CFFEXInvestorSettleFile = 'F'
THOST_FTDC_SPI_InvestorIDType = 'a'
THOST_FTDC_SPI_FreezeMaxReMain = 'r'
THOST_FTDC_SPI_IsSync = 'A'
THOST_FTDC_SPI_RelieveOpenLimit = 'O'
THOST_FTDC_SPI_IsStandardFreeze = 'X'
THOST_FTDC_SPI_CZCENormalProductHedge = 'B'
THOST_FTDC_TPID_EncryptionStandard = 'E'
THOST_FTDC_TPID_RiskMode = 'R'
THOST_FTDC_TPID_RiskModeGlobal = 'G'
THOST_FTDC_TPID_modeEncode = 'P'
THOST_FTDC_TPID_tickMode = 'T'
THOST_FTDC_TPID_SingleUserSessionMaxNum = 'S'
THOST_FTDC_TPID_LoginFailMaxNum = 'L'
THOST_FTDC_TPID_IsAuthForce = 'A'
THOST_FTDC_TPID_IsPosiFreeze = 'F'
THOST_FTDC_TPID_IsPosiLimit = 'M'
THOST_FTDC_TPID_ForQuoteTimeInterval = 'Q'
THOST_FTDC_TPID_IsFuturePosiLimit = 'B'
THOST_FTDC_TPID_IsFutureOrderFreq = 'C'
THOST_FTDC_TPID_IsExecOrderProfit = 'H'
THOST_FTDC_TPID_IsCheckBankAcc = 'I'
THOST_FTDC_TPID_PasswordDeadLine = 'J'
THOST_FTDC_TPID_IsStrongPassword = 'K'
THOST_FTDC_TPID_BalanceMorgage = 'a'
THOST_FTDC_TPID_MinPwdLen = 'O'
THOST_FTDC_TPID_LoginFailMaxNumForIP = 'U'
THOST_FTDC_TPID_PasswordPeriod = 'V'
THOST_FTDC_FI_SettlementFund = 'F'
THOST_FTDC_FI_Trade = 'T'
THOST_FTDC_FI_InvestorPosition = 'P'
THOST_FTDC_FI_SubEntryFund = 'O'
THOST_FTDC_FI_CZCECombinationPos = 'C'
THOST_FTDC_FI_CSRCData = 'R'
THOST_FTDC_FI_CZCEClose = 'L'
THOST_FTDC_FI_CZCENoClose = 'N'
THOST_FTDC_FI_PositionDtl = 'D'
THOST_FTDC_FI_OptionStrike = 'S'
THOST_FTDC_FI_SettlementPriceComparison = 'M'
THOST_FTDC_FI_NonTradePosChange = 'B'
THOST_FTDC_FUT_Settlement = '0'
THOST_FTDC_FUT_Check = '1'
THOST_FTDC_FFT_Txt = '0'
THOST_FTDC_FFT_Zip = '1'
THOST_FTDC_FFT_DBF = '2'
THOST_FTDC_FUS_SucceedUpload = '1'
THOST_FTDC_FUS_FailedUpload = '2'
THOST_FTDC_FUS_SucceedLoad = '3'
THOST_FTDC_FUS_PartSucceedLoad = '4'
THOST_FTDC_FUS_FailedLoad = '5'
THOST_FTDC_TD_Out = '0'
THOST_FTDC_TD_In = '1'
THOST_FTDC_SC_NoSpecialRule = '0'
THOST_FTDC_SC_NoSpringFestival = '1'
THOST_FTDC_IPT_LastSettlement = '1'
THOST_FTDC_IPT_LaseClose = '2'
THOST_FTDC_PLP_Active = '1'
THOST_FTDC_PLP_NonActive = '2'
THOST_FTDC_PLP_Canceled = '3'
THOST_FTDC_DM_CashDeliv = '1'
THOST_FTDC_DM_CommodityDeliv = '2'
THOST_FTDC_FIOT_FundIO = '1'
THOST_FTDC_FIOT_Transfer = '2'
THOST_FTDC_FIOT_SwapCurrency = '3'
THOST_FTDC_FT_Deposite = '1'
THOST_FTDC_FT_ItemFund = '2'
THOST_FTDC_FT_Company = '3'
THOST_FTDC_FT_InnerTransfer = '4'
THOST_FTDC_FD_In = '1'
THOST_FTDC_FD_Out = '2'
THOST_FTDC_FS_Record = '1'
THOST_FTDC_FS_Check = '2'
THOST_FTDC_FS_Charge = '3'
THOST_FTDC_PS_None = '1'
THOST_FTDC_PS_Publishing = '2'
THOST_FTDC_PS_Published = '3'
THOST_FTDC_ES_NonActive = '1'
THOST_FTDC_ES_Startup = '2'
THOST_FTDC_ES_Initialize = '3'
THOST_FTDC_ES_Initialized = '4'
THOST_FTDC_ES_Close = '5'
THOST_FTDC_ES_Closed = '6'
THOST_FTDC_ES_Settlement = '7'
THOST_FTDC_STS_Initialize = '0'
THOST_FTDC_STS_Settlementing = '1'
THOST_FTDC_STS_Settlemented = '2'
THOST_FTDC_STS_Finished = '3'
THOST_FTDC_CT_Person = '0'
THOST_FTDC_CT_Company = '1'
THOST_FTDC_CT_Fund = '2'
THOST_FTDC_CT_SpecialOrgan = '3'
THOST_FTDC_CT_Asset = '4'
THOST_FTDC_BT_Trade = '0'
THOST_FTDC_BT_TradeSettle = '1'
THOST_FTDC_FAS_Low = '1'
THOST_FTDC_FAS_Normal = '2'
THOST_FTDC_FAS_Focus = '3'
THOST_FTDC_FAS_Risk = '4'
THOST_FTDC_FAS_ByTrade = '1'
THOST_FTDC_FAS_ByDeliv = '2'
THOST_FTDC_FAS_None = '3'
THOST_FTDC_FAS_FixFee = '4'
THOST_FTDC_PWDT_Trade = '1'
THOST_FTDC_PWDT_Account = '2'
THOST_FTDC_AG_All = '1'
THOST_FTDC_AG_OnlyLost = '2'
THOST_FTDC_AG_OnlyGain = '3'
THOST_FTDC_AG_None = '4'
THOST_FTDC_ICP_Include = '0'
THOST_FTDC_ICP_NotInclude = '2'
THOST_FTDC_AWT_Enable = '0'
THOST_FTDC_AWT_Disable = '2'
THOST_FTDC_AWT_NoHoldEnable = '3'
THOST_FTDC_FPWD_UnCheck = '0'
THOST_FTDC_FPWD_Check = '1'
THOST_FTDC_TT_BankToFuture = '0'
THOST_FTDC_TT_FutureToBank = '1'
THOST_FTDC_TVF_Invalid = '0'
THOST_FTDC_TVF_Valid = '1'
THOST_FTDC_TVF_Reverse = '2'
THOST_FTDC_RN_CD = '0'
THOST_FTDC_RN_ZT = '1'
THOST_FTDC_RN_QT = '2'
THOST_FTDC_SEX_None = '0'
THOST_FTDC_SEX_Man = '1'
THOST_FTDC_SEX_Woman = '2'
THOST_FTDC_UT_Investor = '0'
THOST_FTDC_UT_Operator = '1'
THOST_FTDC_UT_SuperUser = '2'
THOST_FTDC_RATETYPE_MarginRate = '2'
THOST_FTDC_NOTETYPE_TradeSettleBill = '1'
THOST_FTDC_NOTETYPE_TradeSettleMonth = '2'
THOST_FTDC_NOTETYPE_CallMarginNotes = '3'
THOST_FTDC_NOTETYPE_ForceCloseNotes = '4'
THOST_FTDC_NOTETYPE_TradeNotes = '5'
THOST_FTDC_NOTETYPE_DelivNotes = '6'
THOST_FTDC_SBS_Day = '1'
THOST_FTDC_SBS_Volume = '2'
THOST_FTDC_ST_Day = '0'
THOST_FTDC_ST_Month = '1'
THOST_FTDC_URT_Logon = '1'
THOST_FTDC_URT_Transfer = '2'
THOST_FTDC_URT_EMail = '3'
THOST_FTDC_URT_Fax = '4'
THOST_FTDC_URT_ConditionOrder = '5'
THOST_FTDC_MPT_PreSettlementPrice = '1'
THOST_FTDC_MPT_SettlementPrice = '2'
THOST_FTDC_MPT_AveragePrice = '3'
THOST_FTDC_MPT_OpenPrice = '4'
THOST_FTDC_BGS_None = '0'
THOST_FTDC_BGS_NoGenerated = '1'
THOST_FTDC_BGS_Generated = '2'
THOST_FTDC_AT_HandlePositionAlgo = '1'
THOST_FTDC_AT_FindMarginRateAlgo = '2'
THOST_FTDC_HPA_Base = '1'
THOST_FTDC_HPA_DCE = '2'
THOST_FTDC_HPA_CZCE = '3'
THOST_FTDC_FMRA_Base = '1'
THOST_FTDC_FMRA_DCE = '2'
THOST_FTDC_FMRA_CZCE = '3'
THOST_FTDC_HTAA_Base = '1'
THOST_FTDC_HTAA_DCE = '2'
THOST_FTDC_HTAA_CZCE = '3'
THOST_FTDC_PST_Order = '1'
THOST_FTDC_PST_Open = '2'
THOST_FTDC_PST_Fund = '3'
THOST_FTDC_PST_Settlement = '4'
THOST_FTDC_PST_Company = '5'
THOST_FTDC_PST_Corporation = '6'
THOST_FTDC_PST_LinkMan = '7'
THOST_FTDC_PST_Ledger = '8'
THOST_FTDC_PST_Trustee = '9'
THOST_FTDC_PST_TrusteeCorporation = 'A'
THOST_FTDC_PST_TrusteeOpen = 'B'
THOST_FTDC_PST_TrusteeContact = 'C'
THOST_FTDC_PST_ForeignerRefer = 'D'
THOST_FTDC_PST_CorporationRefer = 'E'
THOST_FTDC_QIR_All = '1'
THOST_FTDC_QIR_Group = '2'
THOST_FTDC_QIR_Single = '3'
THOST_FTDC_IRS_Normal = '1'
THOST_FTDC_IRS_Warn = '2'
THOST_FTDC_IRS_Call = '3'
THOST_FTDC_IRS_Force = '4'
THOST_FTDC_IRS_Exception = '5'
THOST_FTDC_UET_Login = '1'
THOST_FTDC_UET_Logout = '2'
THOST_FTDC_UET_Trading = '3'
THOST_FTDC_UET_TradingError = '4'
THOST_FTDC_UET_UpdatePassword = '5'
THOST_FTDC_UET_Authenticate = '6'
THOST_FTDC_UET_SubmitSysInfo = '7'
THOST_FTDC_UET_Transfer = '8'
THOST_FTDC_UET_Other = '9'
THOST_FTDC_ICS_Close = '0'
THOST_FTDC_ICS_CloseToday = '1'
THOST_FTDC_SM_Non = '0'
THOST_FTDC_SM_Instrument = '1'
THOST_FTDC_SM_Product = '2'
THOST_FTDC_SM_Investor = '3'
THOST_FTDC_PAOS_NotSend = '1'
THOST_FTDC_PAOS_Send = '2'
THOST_FTDC_PAOS_Deleted = '3'
THOST_FTDC_VDS_Dealing = '1'
THOST_FTDC_VDS_DeaclSucceed = '2'
THOST_FTDC_ORGS_Standard = '0'
THOST_FTDC_ORGS_ESunny = '1'
THOST_FTDC_ORGS_KingStarV6 = '2'
THOST_FTDC_VTS_NaturalDeal = '0'
THOST_FTDC_VTS_SucceedEnd = '1'
THOST_FTDC_VTS_FailedEND = '2'
THOST_FTDC_VTS_Exception = '3'
THOST_FTDC_VTS_ManualDeal = '4'
THOST_FTDC_VTS_MesException = '5'
THOST_FTDC_VTS_SysException = '6'
THOST_FTDC_VBAT_BankBook = '1'
THOST_FTDC_VBAT_BankCard = '2'
THOST_FTDC_VBAT_CreditCard = '3'
THOST_FTDC_VMS_Natural = '0'
THOST_FTDC_VMS_Canceled = '9'
THOST_FTDC_VAA_NoAvailAbility = '0'
THOST_FTDC_VAA_AvailAbility = '1'
THOST_FTDC_VAA_Repeal = '2'
THOST_FTDC_VTC_BankBankToFuture = '102001'
THOST_FTDC_VTC_BankFutureToBank = '102002'
THOST_FTDC_VTC_FutureBankToFuture = '202001'
THOST_FTDC_VTC_FutureFutureToBank = '202002'
THOST_FTDC_GEN_Program = '0'
THOST_FTDC_GEN_HandWork = '1'
THOST_FTDC_CFMMCKK_REQUEST = 'R'
THOST_FTDC_CFMMCKK_AUTO = 'A'
THOST_FTDC_CFMMCKK_MANUAL = 'M'
THOST_FTDC_CFT_IDCard = '0'
THOST_FTDC_CFT_Passport = '1'
THOST_FTDC_CFT_OfficerIDCard = '2'
THOST_FTDC_CFT_SoldierIDCard = '3'
THOST_FTDC_CFT_HomeComingCard = '4'
THOST_FTDC_CFT_HouseholdRegister = '5'
THOST_FTDC_CFT_LicenseNo = '6'
THOST_FTDC_CFT_InstitutionCodeCard = '7'
THOST_FTDC_CFT_TempLicenseNo = '8'
THOST_FTDC_CFT_NoEnterpriseLicenseNo = '9'
THOST_FTDC_CFT_OtherCard = 'x'
THOST_FTDC_CFT_SuperDepAgree = 'a'
THOST_FTDC_FBC_Others = '0'
THOST_FTDC_FBC_TransferDetails = '1'
THOST_FTDC_FBC_CustAccStatus = '2'
THOST_FTDC_FBC_AccountTradeDetails = '3'
THOST_FTDC_FBC_FutureAccountChangeInfoDetails = '4'
THOST_FTDC_FBC_CustMoneyDetail = '5'
THOST_FTDC_FBC_CustCancelAccountInfo = '6'
THOST_FTDC_FBC_CustMoneyResult = '7'
THOST_FTDC_FBC_OthersExceptionResult = '8'
THOST_FTDC_FBC_CustInterestNetMoneyDetails = '9'
THOST_FTDC_FBC_CustMoneySendAndReceiveDetails = 'a'
THOST_FTDC_FBC_CorporationMoneyTotal = 'b'
THOST_FTDC_FBC_MainbodyMoneyTotal = 'c'
THOST_FTDC_FBC_MainPartMonitorData = 'd'
THOST_FTDC_FBC_PreparationMoney = 'e'
THOST_FTDC_FBC_BankMoneyMonitorData = 'f'
THOST_FTDC_CEC_Exchange = '1'
THOST_FTDC_CEC_Cash = '2'
THOST_FTDC_YNI_Yes = '0'
THOST_FTDC_YNI_No = '1'
THOST_FTDC_BLT_CurrentMoney = '0'
THOST_FTDC_BLT_UsableMoney = '1'
THOST_FTDC_BLT_FetchableMoney = '2'
THOST_FTDC_BLT_FreezeMoney = '3'
THOST_FTDC_GD_Unknown = '0'
THOST_FTDC_GD_Male = '1'
THOST_FTDC_GD_Female = '2'
THOST_FTDC_FPF_BEN = '0'
THOST_FTDC_FPF_OUR = '1'
THOST_FTDC_FPF_SHA = '2'
THOST_FTDC_PWKT_ExchangeKey = '0'
THOST_FTDC_PWKT_PassWordKey = '1'
THOST_FTDC_PWKT_MACKey = '2'
THOST_FTDC_PWKT_MessageKey = '3'
THOST_FTDC_PWT_Query = '0'
THOST_FTDC_PWT_Fetch = '1'
THOST_FTDC_PWT_Transfer = '2'
THOST_FTDC_PWT_Trade = '3'
THOST_FTDC_EM_NoEncry = '0'
THOST_FTDC_EM_DES = '1'
THOST_FTDC_EM_3DES = '2'
THOST_FTDC_BRF_BankNotNeedRepeal = '0'
THOST_FTDC_BRF_BankWaitingRepeal = '1'
THOST_FTDC_BRF_BankBeenRepealed = '2'
THOST_FTDC_BRORF_BrokerNotNeedRepeal = '0'
THOST_FTDC_BRORF_BrokerWaitingRepeal = '1'
THOST_FTDC_BRORF_BrokerBeenRepealed = '2'
THOST_FTDC_TS_Bank = '0'
THOST_FTDC_TS_Future = '1'
THOST_FTDC_TS_Store = '2'
THOST_FTDC_LF_Yes = '0'
THOST_FTDC_LF_No = '1'
THOST_FTDC_BAS_Normal = '0'
THOST_FTDC_BAS_Freeze = '1'
THOST_FTDC_BAS_ReportLoss = '2'
THOST_FTDC_MAS_Normal = '0'
THOST_FTDC_MAS_Cancel = '1'
THOST_FTDC_MSS_Point = '0'
THOST_FTDC_MSS_PrePoint = '1'
THOST_FTDC_MSS_CancelPoint = '2'
THOST_FTDC_SYT_FutureBankTransfer = '0'
THOST_FTDC_SYT_StockBankTransfer = '1'
THOST_FTDC_SYT_TheThirdPartStore = '2'
THOST_FTDC_TEF_NormalProcessing = '0'
THOST_FTDC_TEF_Success = '1'
THOST_FTDC_TEF_Failed = '2'
THOST_FTDC_TEF_Abnormal = '3'
THOST_FTDC_TEF_ManualProcessedForException = '4'
THOST_FTDC_TEF_CommuFailedNeedManualProcess = '5'
THOST_FTDC_TEF_SysErrorNeedManualProcess = '6'
THOST_FTDC_PSS_NotProcess = '0'
THOST_FTDC_PSS_StartProcess = '1'
THOST_FTDC_PSS_Finished = '2'
THOST_FTDC_CUSTT_Person = '0'
THOST_FTDC_CUSTT_Institution = '1'
THOST_FTDC_FBTTD_FromBankToFuture = '1'
THOST_FTDC_FBTTD_FromFutureToBank = '2'
THOST_FTDC_OOD_Open = '1'
THOST_FTDC_OOD_Destroy = '0'
THOST_FTDC_AVAF_Invalid = '0'
THOST_FTDC_AVAF_Valid = '1'
THOST_FTDC_AVAF_Repeal = '2'
THOST_FTDC_OT_Bank = '1'
THOST_FTDC_OT_Future = '2'
THOST_FTDC_OT_PlateForm = '9'
THOST_FTDC_OL_HeadQuarters = '1'
THOST_FTDC_OL_Branch = '2'
THOST_FTDC_PID_FutureProtocal = '0'
THOST_FTDC_PID_ICBCProtocal = '1'
THOST_FTDC_PID_ABCProtocal = '2'
THOST_FTDC_PID_CBCProtocal = '3'
THOST_FTDC_PID_CCBProtocal = '4'
THOST_FTDC_PID_BOCOMProtocal = '5'
THOST_FTDC_PID_FBTPlateFormProtocal = 'X'
THOST_FTDC_CM_ShortConnect = '0'
THOST_FTDC_CM_LongConnect = '1'
THOST_FTDC_SRM_ASync = '0'
THOST_FTDC_SRM_Sync = '1'
THOST_FTDC_BAT_BankBook = '1'
THOST_FTDC_BAT_SavingCard = '2'
THOST_FTDC_BAT_CreditCard = '3'
THOST_FTDC_FAT_BankBook = '1'
THOST_FTDC_FAT_SavingCard = '2'
THOST_FTDC_FAT_CreditCard = '3'
THOST_FTDC_OS_Ready = '0'
THOST_FTDC_OS_CheckIn = '1'
THOST_FTDC_OS_CheckOut = '2'
THOST_FTDC_OS_CheckFileArrived = '3'
THOST_FTDC_OS_CheckDetail = '4'
THOST_FTDC_OS_DayEndClean = '5'
THOST_FTDC_OS_Invalid = '9'
THOST_FTDC_CCBFM_ByAmount = '1'
THOST_FTDC_CCBFM_ByMonth = '2'
THOST_FTDC_CAPIT_Client = '1'
THOST_FTDC_CAPIT_Server = '2'
THOST_FTDC_CAPIT_UserApi = '3'
THOST_FTDC_LS_Connected = '1'
THOST_FTDC_LS_Disconnected = '2'
THOST_FTDC_BPWDF_NoCheck = '0'
THOST_FTDC_BPWDF_BlankCheck = '1'
THOST_FTDC_BPWDF_EncryptCheck = '2'
THOST_FTDC_SAT_AccountID = '1'
THOST_FTDC_SAT_CardID = '2'
THOST_FTDC_SAT_SHStockholderID = '3'
THOST_FTDC_SAT_SZStockholderID = '4'
THOST_FTDC_TRFS_Normal = '0'
THOST_FTDC_TRFS_Repealed = '1'
THOST_FTDC_SPTYPE_Broker = '0'
THOST_FTDC_SPTYPE_Bank = '1'
THOST_FTDC_REQRSP_Request = '0'
THOST_FTDC_REQRSP_Response = '1'
THOST_FTDC_FBTUET_SignIn = '0'
THOST_FTDC_FBTUET_FromBankToFuture = '1'
THOST_FTDC_FBTUET_FromFutureToBank = '2'
THOST_FTDC_FBTUET_OpenAccount = '3'
THOST_FTDC_FBTUET_CancelAccount = '4'
THOST_FTDC_FBTUET_ChangeAccount = '5'
THOST_FTDC_FBTUET_RepealFromBankToFuture = '6'
THOST_FTDC_FBTUET_RepealFromFutureToBank = '7'
THOST_FTDC_FBTUET_QueryBankAccount = '8'
THOST_FTDC_FBTUET_QueryFutureAccount = '9'
THOST_FTDC_FBTUET_SignOut = 'A'
THOST_FTDC_FBTUET_SyncKey = 'B'
THOST_FTDC_FBTUET_ReserveOpenAccount = 'C'
THOST_FTDC_FBTUET_CancelReserveOpenAccount = 'D'
THOST_FTDC_FBTUET_ReserveOpenAccountConfirm = 'E'
THOST_FTDC_FBTUET_Other = 'Z'
THOST_FTDC_DBOP_Insert = '0'
THOST_FTDC_DBOP_Update = '1'
THOST_FTDC_DBOP_Delete = '2'
THOST_FTDC_SYNF_Yes = '0'
THOST_FTDC_SYNF_No = '1'
THOST_FTDC_SYNT_OneOffSync = '0'
THOST_FTDC_SYNT_TimerSync = '1'
THOST_FTDC_SYNT_TimerFullSync = '2'
THOST_FTDC_FBEDIR_Settlement = '0'
THOST_FTDC_FBEDIR_Sale = '1'
THOST_FTDC_FBERES_Success = '0'
THOST_FTDC_FBERES_InsufficientBalance = '1'
THOST_FTDC_FBERES_UnknownTrading = '8'
THOST_FTDC_FBERES_Fail = 'x'
THOST_FTDC_FBEES_Normal = '0'
THOST_FTDC_FBEES_ReExchange = '1'
THOST_FTDC_FBEFG_DataPackage = '0'
THOST_FTDC_FBEFG_File = '1'
THOST_FTDC_FBEAT_NotTrade = '0'
THOST_FTDC_FBEAT_Trade = '1'
THOST_FTDC_FBEUET_SignIn = '0'
THOST_FTDC_FBEUET_Exchange = '1'
THOST_FTDC_FBEUET_ReExchange = '2'
THOST_FTDC_FBEUET_QueryBankAccount = '3'
THOST_FTDC_FBEUET_QueryExchDetial = '4'
THOST_FTDC_FBEUET_QueryExchSummary = '5'
THOST_FTDC_FBEUET_QueryExchRate = '6'
THOST_FTDC_FBEUET_CheckBankAccount = '7'
THOST_FTDC_FBEUET_SignOut = '8'
THOST_FTDC_FBEUET_Other = 'Z'
THOST_FTDC_FBERF_UnProcessed = '0'
THOST_FTDC_FBERF_WaitSend = '1'
THOST_FTDC_FBERF_SendSuccess = '2'
THOST_FTDC_FBERF_SendFailed = '3'
THOST_FTDC_FBERF_WaitReSend = '4'
THOST_FTDC_NC_NOERROR = '0'
THOST_FTDC_NC_Warn = '1'
THOST_FTDC_NC_Call = '2'
THOST_FTDC_NC_Force = '3'
THOST_FTDC_NC_CHUANCANG = '4'
THOST_FTDC_NC_Exception = '5'
THOST_FTDC_FCT_Manual = '0'
THOST_FTDC_FCT_Single = '1'
THOST_FTDC_FCT_Group = '2'
THOST_FTDC_RNM_System = '0'
THOST_FTDC_RNM_SMS = '1'
THOST_FTDC_RNM_EMail = '2'
THOST_FTDC_RNM_Manual = '3'
THOST_FTDC_RNS_NotGen = '0'
THOST_FTDC_RNS_Generated = '1'
THOST_FTDC_RNS_SendError = '2'
THOST_FTDC_RNS_SendOk = '3'
THOST_FTDC_RNS_Received = '4'
THOST_FTDC_RNS_Confirmed = '5'
THOST_FTDC_RUE_ExportData = '0'
THOST_FTDC_COST_LastPriceAsc = '0'
THOST_FTDC_COST_LastPriceDesc = '1'
THOST_FTDC_COST_AskPriceAsc = '2'
THOST_FTDC_COST_AskPriceDesc = '3'
THOST_FTDC_COST_BidPriceAsc = '4'
THOST_FTDC_COST_BidPriceDesc = '5'
THOST_FTDC_UOAST_NoSend = '0'
THOST_FTDC_UOAST_Sended = '1'
THOST_FTDC_UOAST_Generated = '2'
THOST_FTDC_UOAST_SendFail = '3'
THOST_FTDC_UOAST_Success = '4'
THOST_FTDC_UOAST_Fail = '5'
THOST_FTDC_UOAST_Cancel = '6'
THOST_FTDC_UOACS_NoApply = '1'
THOST_FTDC_UOACS_Submited = '2'
THOST_FTDC_UOACS_Sended = '3'
THOST_FTDC_UOACS_Success = '4'
THOST_FTDC_UOACS_Refuse = '5'
THOST_FTDC_UOACS_Cancel = '6'
THOST_FTDC_QT_Radio = '1'
THOST_FTDC_QT_Option = '2'
THOST_FTDC_QT_Blank = '3'
THOST_FTDC_BT_Request = '1'
THOST_FTDC_BT_Response = '2'
THOST_FTDC_BT_Notice = '3'
THOST_FTDC_CRC_Success = '0'
THOST_FTDC_CRC_Working = '1'
THOST_FTDC_CRC_InfoFail = '2'
THOST_FTDC_CRC_IDCardFail = '3'
THOST_FTDC_CRC_OtherFail = '4'
THOST_FTDC_CfMMCCT_All = '0'
THOST_FTDC_CfMMCCT_Person = '1'
THOST_FTDC_CfMMCCT_Company = '2'
THOST_FTDC_CfMMCCT_Other = '3'
THOST_FTDC_CfMMCCT_SpecialOrgan = '4'
THOST_FTDC_CfMMCCT_Asset = '5'
THOST_FTDC_EIDT_SHFE = 'S'
THOST_FTDC_EIDT_CZCE = 'Z'
THOST_FTDC_EIDT_DCE = 'D'
THOST_FTDC_EIDT_CFFEX = 'J'
THOST_FTDC_EIDT_INE = 'N'
THOST_FTDC_ECIDT_Hedge = '1'
THOST_FTDC_ECIDT_Arbitrage = '2'
THOST_FTDC_ECIDT_Speculation = '3'
THOST_FTDC_UF_NoUpdate = '0'
THOST_FTDC_UF_Success = '1'
THOST_FTDC_UF_Fail = '2'
THOST_FTDC_UF_TCSuccess = '3'
THOST_FTDC_UF_TCFail = '4'
THOST_FTDC_UF_Cancel = '5'
THOST_FTDC_AOID_OpenInvestor = '1'
THOST_FTDC_AOID_ModifyIDCard = '2'
THOST_FTDC_AOID_ModifyNoIDCard = '3'
THOST_FTDC_AOID_ApplyTradingCode = '4'
THOST_FTDC_AOID_CancelTradingCode = '5'
THOST_FTDC_AOID_CancelInvestor = '6'
THOST_FTDC_AOID_FreezeAccount = '8'
THOST_FTDC_AOID_ActiveFreezeAccount = '9'
THOST_FTDC_ASID_NoComplete = '1'
THOST_FTDC_ASID_Submited = '2'
THOST_FTDC_ASID_Checked = '3'
THOST_FTDC_ASID_Refused = '4'
THOST_FTDC_ASID_Deleted = '5'
THOST_FTDC_UOASM_ByAPI = '1'
THOST_FTDC_UOASM_ByFile = '2'
THOST_FTDC_EvM_ADD = '1'
THOST_FTDC_EvM_UPDATE = '2'
THOST_FTDC_EvM_DELETE = '3'
THOST_FTDC_EvM_CHECK = '4'
THOST_FTDC_EvM_COPY = '5'
THOST_FTDC_EvM_CANCEL = '6'
THOST_FTDC_EvM_Reverse = '7'
THOST_FTDC_UOAA_ASR = '1'
THOST_FTDC_UOAA_ASNR = '2'
THOST_FTDC_UOAA_NSAR = '3'
THOST_FTDC_UOAA_NSR = '4'
THOST_FTDC_EvM_InvestorGroupFlow = '1'
THOST_FTDC_EvM_InvestorRate = '2'
THOST_FTDC_EvM_InvestorCommRateModel = '3'
THOST_FTDC_CL_Zero = '0'
THOST_FTDC_CL_One = '1'
THOST_FTDC_CL_Two = '2'
THOST_FTDC_CHS_Init = '0'
THOST_FTDC_CHS_Checking = '1'
THOST_FTDC_CHS_Checked = '2'
THOST_FTDC_CHS_Refuse = '3'
THOST_FTDC_CHS_Cancel = '4'
THOST_FTDC_CHU_Unused = '0'
THOST_FTDC_CHU_Used = '1'
THOST_FTDC_CHU_Fail = '2'
THOST_FTDC_BAO_ByAccProperty = '0'
THOST_FTDC_BAO_ByFBTransfer = '1'
THOST_FTDC_MBTS_ByInstrument = '0'
THOST_FTDC_MBTS_ByDayInsPrc = '1'
THOST_FTDC_MBTS_ByDayIns = '2'
THOST_FTDC_FTC_BankLaunchBankToBroker = '102001'
THOST_FTDC_FTC_BrokerLaunchBankToBroker = '202001'
THOST_FTDC_FTC_BankLaunchBrokerToBank = '102002'
THOST_FTDC_FTC_BrokerLaunchBrokerToBank = '202002'
THOST_FTDC_OTP_NONE = '0'
THOST_FTDC_OTP_TOTP = '1'
THOST_FTDC_OTPS_Unused = '0'
THOST_FTDC_OTPS_Used = '1'
THOST_FTDC_OTPS_Disuse = '2'
THOST_FTDC_BUT_Investor = '1'
THOST_FTDC_BUT_BrokerUser = '2'
THOST_FTDC_FUTT_Commodity = '1'
THOST_FTDC_FUTT_Financial = '2'
THOST_FTDC_FET_Restriction = '0'
THOST_FTDC_FET_TodayRestriction = '1'
THOST_FTDC_FET_Transfer = '2'
THOST_FTDC_FET_Credit = '3'
THOST_FTDC_FET_InvestorWithdrawAlm = '4'
THOST_FTDC_FET_BankRestriction = '5'
THOST_FTDC_FET_Accountregister = '6'
THOST_FTDC_FET_ExchangeFundIO = '7'
THOST_FTDC_FET_InvestorFundIO = '8'
THOST_FTDC_AST_FBTransfer = '0'
THOST_FTDC_AST_ManualEntry = '1'
THOST_FTDC_CST_UnifyAccount = '0'
THOST_FTDC_CST_ManualEntry = '1'
THOST_FTDC_UR_All = '0'
THOST_FTDC_UR_Single = '1'
THOST_FTDC_BG_Investor = '2'
THOST_FTDC_BG_Group = '1'
THOST_FTDC_TSSM_Instrument = '1'
THOST_FTDC_TSSM_Product = '2'
THOST_FTDC_TSSM_Exchange = '3'
THOST_FTDC_ESM_Relative = '1'
THOST_FTDC_ESM_Typical = '2'
THOST_FTDC_RIR_All = '1'
THOST_FTDC_RIR_Model = '2'
THOST_FTDC_RIR_Single = '3'
THOST_FTDC_SDS_Initialize = '0'
THOST_FTDC_SDS_Settlementing = '1'
THOST_FTDC_SDS_Settlemented = '2'
THOST_FTDC_TSRC_NORMAL = '0'
THOST_FTDC_TSRC_QUERY = '1'
THOST_FTDC_FSM_Product = '1'
THOST_FTDC_FSM_Exchange = '2'
THOST_FTDC_FSM_All = '3'
THOST_FTDC_BIR_Property = '1'
THOST_FTDC_BIR_All = '2'
THOST_FTDC_PIR_All = '1'
THOST_FTDC_PIR_Property = '2'
THOST_FTDC_PIR_Single = '3'
THOST_FTDC_FIS_NoCreate = '0'
THOST_FTDC_FIS_Created = '1'
THOST_FTDC_FIS_Failed = '2'
THOST_FTDC_FGS_FileTransmit = '0'
THOST_FTDC_FGS_FileGen = '1'
THOST_FTDC_SoM_Add = '1'
THOST_FTDC_SoM_Update = '2'
THOST_FTDC_SoM_Delete = '3'
THOST_FTDC_SoM_Copy = '4'
THOST_FTDC_SoM_AcTive = '5'
THOST_FTDC_SoM_CanCel = '6'
THOST_FTDC_SoM_ReSet = '7'
THOST_FTDC_SoT_UpdatePassword = '0'
THOST_FTDC_SoT_UserDepartment = '1'
THOST_FTDC_SoT_RoleManager = '2'
THOST_FTDC_SoT_RoleFunction = '3'
THOST_FTDC_SoT_BaseParam = '4'
THOST_FTDC_SoT_SetUserID = '5'
THOST_FTDC_SoT_SetUserRole = '6'
THOST_FTDC_SoT_UserIpRestriction = '7'
THOST_FTDC_SoT_DepartmentManager = '8'
THOST_FTDC_SoT_DepartmentCopy = '9'
THOST_FTDC_SoT_Tradingcode = 'A'
THOST_FTDC_SoT_InvestorStatus = 'B'
THOST_FTDC_SoT_InvestorAuthority = 'C'
THOST_FTDC_SoT_PropertySet = 'D'
THOST_FTDC_SoT_ReSetInvestorPasswd = 'E'
THOST_FTDC_SoT_InvestorPersonalityInfo = 'F'
THOST_FTDC_CSRCQ_Current = '0'
THOST_FTDC_CSRCQ_History = '1'
THOST_FTDC_FRS_Normal = '1'
THOST_FTDC_FRS_Freeze = '0'
THOST_FTDC_STST_Standard = '0'
THOST_FTDC_STST_NonStandard = '1'
THOST_FTDC_RPT_Freeze = '1'
THOST_FTDC_RPT_FreezeActive = '2'
THOST_FTDC_RPT_OpenLimit = '3'
THOST_FTDC_RPT_RelieveOpenLimit = '4'
THOST_FTDC_AMLDS_Normal = '0'
THOST_FTDC_AMLDS_Deleted = '1'
THOST_FTDC_AMLCHS_Init = '0'
THOST_FTDC_AMLCHS_Checking = '1'
THOST_FTDC_AMLCHS_Checked = '2'
THOST_FTDC_AMLCHS_RefuseReport = '3'
THOST_FTDC_AMLDT_DrawDay = '0'
THOST_FTDC_AMLDT_TouchDay = '1'
THOST_FTDC_AMLCL_CheckLevel0 = '0'
THOST_FTDC_AMLCL_CheckLevel1 = '1'
THOST_FTDC_AMLCL_CheckLevel2 = '2'
THOST_FTDC_AMLCL_CheckLevel3 = '3'
THOST_FTDC_EFT_CSV = '0'
THOST_FTDC_EFT_EXCEL = '1'
THOST_FTDC_EFT_DBF = '2'
THOST_FTDC_SMT_Before = '1'
THOST_FTDC_SMT_Settlement = '2'
THOST_FTDC_SMT_After = '3'
THOST_FTDC_SMT_Settlemented = '4'
THOST_FTDC_SML_Must = '1'
THOST_FTDC_SML_Alarm = '2'
THOST_FTDC_SML_Prompt = '3'
THOST_FTDC_SML_Ignore = '4'
THOST_FTDC_SMG_Exhcange = '1'
THOST_FTDC_SMG_ASP = '2'
THOST_FTDC_SMG_CSRC = '3'
THOST_FTDC_LUT_Repeatable = '1'
THOST_FTDC_LUT_Unrepeatable = '2'
THOST_FTDC_DAR_Settle = '1'
THOST_FTDC_DAR_Exchange = '2'
THOST_FTDC_DAR_CSRC = '3'
THOST_FTDC_MGT_ExchMarginRate = '0'
THOST_FTDC_MGT_InstrMarginRate = '1'
THOST_FTDC_MGT_InstrMarginRateTrade = '2'
THOST_FTDC_ACT_Intraday = '1'
THOST_FTDC_ACT_Long = '2'
THOST_FTDC_MRT_Exchange = '1'
THOST_FTDC_MRT_Investor = '2'
THOST_FTDC_MRT_InvestorTrade = '3'
THOST_FTDC_BUS_UnBak = '0'
THOST_FTDC_BUS_BakUp = '1'
THOST_FTDC_BUS_BakUped = '2'
THOST_FTDC_BUS_BakFail = '3'
THOST_FTDC_SIS_UnInitialize = '0'
THOST_FTDC_SIS_Initialize = '1'
THOST_FTDC_SIS_Initialized = '2'
THOST_FTDC_SRS_NoCreate = '0'
THOST_FTDC_SRS_Create = '1'
THOST_FTDC_SRS_Created = '2'
THOST_FTDC_SRS_CreateFail = '3'
THOST_FTDC_SSS_UnSaveData = '0'
THOST_FTDC_SSS_SaveDatad = '1'
THOST_FTDC_SAS_UnArchived = '0'
THOST_FTDC_SAS_Archiving = '1'
THOST_FTDC_SAS_Archived = '2'
THOST_FTDC_SAS_ArchiveFail = '3'
THOST_FTDC_CTPT_Unkown = '0'
THOST_FTDC_CTPT_MainCenter = '1'
THOST_FTDC_CTPT_BackUp = '2'
THOST_FTDC_CDT_Normal = '0'
THOST_FTDC_CDT_SpecFirst = '1'
THOST_FTDC_MFUR_None = '0'
THOST_FTDC_MFUR_Margin = '1'
THOST_FTDC_MFUR_All = '2'
THOST_FTDC_MFUR_CNY3 = '3'
THOST_FTDC_SPT_CzceHedge = '1'
THOST_FTDC_SPT_IneForeignCurrency = '2'
THOST_FTDC_SPT_DceOpenClose = '3'
THOST_FTDC_FMT_Mortgage = '1'
THOST_FTDC_FMT_Redemption = '2'
THOST_FTDC_ASPI_BaseMargin = '1'
THOST_FTDC_ASPI_LowestInterest = '2'
THOST_FTDC_FMD_In = '1'
THOST_FTDC_FMD_Out = '2'
THOST_FTDC_BT_Profit = '0'
THOST_FTDC_BT_Loss = '1'
THOST_FTDC_BT_Other = 'Z'
THOST_FTDC_SST_Manual = '0'
THOST_FTDC_SST_Automatic = '1'
THOST_FTDC_CED_Settlement = '0'
THOST_FTDC_CED_Sale = '1'
THOST_FTDC_CSS_Entry = '1'
THOST_FTDC_CSS_Approve = '2'
THOST_FTDC_CSS_Refuse = '3'
THOST_FTDC_CSS_Revoke = '4'
THOST_FTDC_CSS_Send = '5'
THOST_FTDC_CSS_Success = '6'
THOST_FTDC_CSS_Failure = '7'
THOST_FTDC_REQF_NoSend = '0'
THOST_FTDC_REQF_SendSuccess = '1'
THOST_FTDC_REQF_SendFailed = '2'
THOST_FTDC_REQF_WaitReSend = '3'
THOST_FTDC_RESF_Success = '0'
THOST_FTDC_RESF_InsuffiCient = '1'
THOST_FTDC_RESF_UnKnown = '8'
THOST_FTDC_EXS_Before = '0'
THOST_FTDC_EXS_After = '1'
THOST_FTDC_CR_Domestic = '1'
THOST_FTDC_CR_GMT = '2'
THOST_FTDC_CR_Foreign = '3'
THOST_FTDC_HB_No = '0'
THOST_FTDC_HB_Yes = '1'
THOST_FTDC_SM_Normal = '1'
THOST_FTDC_SM_Emerge = '2'
THOST_FTDC_SM_Restore = '3'
THOST_FTDC_TPT_Full = '1'
THOST_FTDC_TPT_Increment = '2'
THOST_FTDC_TPT_BackUp = '3'
THOST_FTDC_LM_Trade = '0'
THOST_FTDC_LM_Transfer = '1'
THOST_FTDC_CPT_Instrument = '1'
THOST_FTDC_CPT_Margin = '2'
THOST_FTDC_HT_Yes = '1'
THOST_FTDC_HT_No = '0'
THOST_FTDC_AMT_Bank = '1'
THOST_FTDC_AMT_Securities = '2'
THOST_FTDC_AMT_Fund = '3'
THOST_FTDC_AMT_Insurance = '4'
THOST_FTDC_AMT_Trust = '5'
THOST_FTDC_AMT_Other = '9'
THOST_FTDC_CFIOT_FundIO = '0'
THOST_FTDC_CFIOT_SwapCurrency = '1'
THOST_FTDC_CAT_Futures = '1'
THOST_FTDC_CAT_AssetmgrFuture = '2'
THOST_FTDC_CAT_AssetmgrTrustee = '3'
THOST_FTDC_CAT_AssetmgrTransfer = '4'
THOST_FTDC_LT_Chinese = '1'
THOST_FTDC_LT_English = '2'
THOST_FTDC_AMCT_Person = '1'
THOST_FTDC_AMCT_Organ = '2'
THOST_FTDC_AMCT_SpecialOrgan = '4'
THOST_FTDC_ASST_Futures = '3'
THOST_FTDC_ASST_SpecialOrgan = '4'
THOST_FTDC_CIT_HasExch = '0'
THOST_FTDC_CIT_HasATP = '1'
THOST_FTDC_CIT_HasDiff = '2'
THOST_FTDC_DT_HandDeliv = '1'
THOST_FTDC_DT_PersonDeliv = '2'
THOST_FTDC_MMSA_NO = '0'
THOST_FTDC_MMSA_YES = '1'
THOST_FTDC_CACT_Person = '0'
THOST_FTDC_CACT_Company = '1'
THOST_FTDC_CACT_Other = '2'
THOST_FTDC_UOAAT_Futures = '1'
THOST_FTDC_UOAAT_SpecialOrgan = '2'
THOST_FTDC_DEN_Buy = '0'
THOST_FTDC_DEN_Sell = '1'
THOST_FTDC_OFEN_Open = '0'
THOST_FTDC_OFEN_Close = '1'
THOST_FTDC_OFEN_ForceClose = '2'
THOST_FTDC_OFEN_CloseToday = '3'
THOST_FTDC_OFEN_CloseYesterday = '4'
THOST_FTDC_OFEN_ForceOff = '5'
THOST_FTDC_OFEN_LocalForceClose = '6'
THOST_FTDC_HFEN_Speculation = '1'
THOST_FTDC_HFEN_Arbitrage = '2'
THOST_FTDC_HFEN_Hedge = '3'
THOST_FTDC_FIOTEN_FundIO = '1'
THOST_FTDC_FIOTEN_Transfer = '2'
THOST_FTDC_FIOTEN_SwapCurrency = '3'
THOST_FTDC_FTEN_Deposite = '1'
THOST_FTDC_FTEN_ItemFund = '2'
THOST_FTDC_FTEN_Company = '3'
THOST_FTDC_FTEN_InnerTransfer = '4'
THOST_FTDC_FDEN_In = '1'
THOST_FTDC_FDEN_Out = '2'
THOST_FTDC_FMDEN_In = '1'
THOST_FTDC_FMDEN_Out = '2'
THOST_FTDC_CP_CallOptions = '1'
THOST_FTDC_CP_PutOptions = '2'
THOST_FTDC_STM_Continental = '0'
THOST_FTDC_STM_American = '1'
THOST_FTDC_STM_Bermuda = '2'
THOST_FTDC_STT_Hedge = '0'
THOST_FTDC_STT_Match = '1'
THOST_FTDC_APPT_NotStrikeNum = '4'
THOST_FTDC_GUDS_Gen = '0'
THOST_FTDC_GUDS_Hand = '1'
THOST_FTDC_OER_NoExec = 'n'
THOST_FTDC_OER_Canceled = 'c'
THOST_FTDC_OER_OK = '0'
THOST_FTDC_OER_NoPosition = '1'
THOST_FTDC_OER_NoDeposit = '2'
THOST_FTDC_OER_NoParticipant = '3'
THOST_FTDC_OER_NoClient = '4'
THOST_FTDC_OER_NoInstrument = '6'
THOST_FTDC_OER_NoRight = '7'
THOST_FTDC_OER_InvalidVolume = '8'
THOST_FTDC_OER_NoEnoughHistoryTrade = '9'
THOST_FTDC_OER_Unknown = 'a'
THOST_FTDC_COMBT_Future = '0'
THOST_FTDC_COMBT_BUL = '1'
THOST_FTDC_COMBT_BER = '2'
THOST_FTDC_COMBT_STD = '3'
THOST_FTDC_COMBT_STG = '4'
THOST_FTDC_COMBT_PRT = '5'
THOST_FTDC_COMBT_CLD = '6'
THOST_FTDC_COMBT_OPL = '7'
THOST_FTDC_COMBT_BFO = '8'
THOST_FTDC_DCECOMBT_SPL = '0'
THOST_FTDC_DCECOMBT_OPL = '1'
THOST_FTDC_DCECOMBT_SP = '2'
THOST_FTDC_DCECOMBT_SPC = '3'
THOST_FTDC_DCECOMBT_BLS = '4'
THOST_FTDC_DCECOMBT_BES = '5'
THOST_FTDC_DCECOMBT_CAS = '6'
THOST_FTDC_DCECOMBT_STD = '7'
THOST_FTDC_DCECOMBT_STG = '8'
THOST_FTDC_DCECOMBT_BFO = '9'
THOST_FTDC_DCECOMBT_SFO = 'a'
THOST_FTDC_ORPT_PreSettlementPrice = '1'
THOST_FTDC_ORPT_OpenPrice = '4'
THOST_FTDC_ORPT_MaxPreSettlementPrice = '5'
THOST_FTDC_BLAG_Default = '1'
THOST_FTDC_BLAG_IncludeOptValLost = '2'
THOST_FTDC_ACTP_Exec = '1'
THOST_FTDC_ACTP_Abandon = '2'
THOST_FTDC_FQST_Submitted = 'a'
THOST_FTDC_FQST_Accepted = 'b'
THOST_FTDC_FQST_Rejected = 'c'
THOST_FTDC_VM_Absolute = '0'
THOST_FTDC_VM_Ratio = '1'
THOST_FTDC_EOPF_Reserve = '0'
THOST_FTDC_EOPF_UnReserve = '1'
THOST_FTDC_EOCF_AutoClose = '0'
THOST_FTDC_EOCF_NotToClose = '1'
THOST_FTDC_PTE_Futures = '1'
THOST_FTDC_PTE_Options = '2'
THOST_FTDC_CUFN_CUFN_O = 'O'
THOST_FTDC_CUFN_CUFN_T = 'T'
THOST_FTDC_CUFN_CUFN_P = 'P'
THOST_FTDC_CUFN_CUFN_N = 'N'
THOST_FTDC_CUFN_CUFN_L = 'L'
THOST_FTDC_CUFN_CUFN_F = 'F'
THOST_FTDC_CUFN_CUFN_C = 'C'
THOST_FTDC_CUFN_CUFN_M = 'M'
THOST_FTDC_DUFN_DUFN_O = 'O'
THOST_FTDC_DUFN_DUFN_T = 'T'
THOST_FTDC_DUFN_DUFN_P = 'P'
THOST_FTDC_DUFN_DUFN_F = 'F'
THOST_FTDC_DUFN_DUFN_C = 'C'
THOST_FTDC_DUFN_DUFN_D = 'D'
THOST_FTDC_DUFN_DUFN_M = 'M'
THOST_FTDC_DUFN_DUFN_S = 'S'
THOST_FTDC_SUFN_SUFN_O = 'O'
THOST_FTDC_SUFN_SUFN_T = 'T'
THOST_FTDC_SUFN_SUFN_P = 'P'
THOST_FTDC_SUFN_SUFN_F = 'F'
THOST_FTDC_CFUFN_SUFN_T = 'T'
THOST_FTDC_CFUFN_SUFN_P = 'P'
THOST_FTDC_CFUFN_SUFN_F = 'F'
THOST_FTDC_CFUFN_SUFN_S = 'S'
THOST_FTDC_CMDR_Comb = '0'
THOST_FTDC_CMDR_UnComb = '1'
THOST_FTDC_CMDR_DelComb = '2'
THOST_FTDC_STOV_RealValue = '1'
THOST_FTDC_STOV_ProfitValue = '2'
THOST_FTDC_STOV_RealRatio = '3'
THOST_FTDC_STOV_ProfitRatio = '4'
THOST_FTDC_ROAST_Processing = '0'
THOST_FTDC_ROAST_Cancelled = '1'
THOST_FTDC_ROAST_Opened = '2'
THOST_FTDC_ROAST_Invalid = '3'
THOST_FTDC_WPSR_Lib = '1'
THOST_FTDC_WPSR_Manual = '2'
THOST_FTDC_OSCF_CloseSelfOptionPosition = '1'
THOST_FTDC_OSCF_ReserveOptionPosition = '2'
THOST_FTDC_OSCF_SellCloseSelfFuturePosition = '3'
THOST_FTDC_OSCF_ReserveFuturePosition = '4'
THOST_FTDC_BZTP_Future = '1'
THOST_FTDC_BZTP_Stock = '2'
THOST_FTDC_APP_TYPE_Investor = '1'
THOST_FTDC_APP_TYPE_InvestorRelay = '2'
THOST_FTDC_APP_TYPE_OperatorRelay = '3'
THOST_FTDC_APP_TYPE_UnKnown = '4'
THOST_FTDC_RV_Right = '0'
THOST_FTDC_RV_Refuse = '1'
THOST_FTDC_OTC_TRDT_Block = '0'
THOST_FTDC_OTC_TRDT_EFP = '1'
THOST_FTDC_OTC_MT_DV01 = '1'
THOST_FTDC_OTC_MT_ParValue = '2'
| mit | -3,915,091,831,393,606,700 | 30.931562 | 51 | 0.71729 | false |
flaviobarros/spyre | examples/stocks_example.py | 3 | 2387 | # tested with python2.7 and 3.4
from spyre import server
import pandas as pd
import json
try:
import urllib2
except ImportError:
import urllib.request as urllib2
class StockExample(server.App):
def __init__(self):
# implements a simple caching mechanism to avoid multiple calls to the yahoo finance api
self.data_cache = None
self.params_cache = None
title = "Historical Stock Prices"
inputs = [{ "type":'dropdown',
"label": 'Company',
"options" : [ {"label": "Google", "value":"GOOG"},
{"label": "Yahoo", "value":"YHOO"},
{"label": "Apple", "value":"AAPL"}],
"value":'GOOG',
"key": 'ticker',
"action_id": "update_data"}]
controls = [{ "type" : "hidden",
"id" : "update_data"}]
tabs = ["Plot", "Table"]
outputs = [{ "type" : "plot",
"id" : "plot",
"control_id" : "update_data",
"tab" : "Plot"},
{ "type" : "table",
"id" : "table_id",
"control_id" : "update_data",
"tab" : "Table",
"on_page_load" : True }]
def getData(self, params):
params.pop("output_id",None) # caching layer
if self.params_cache!=params: # caching layer
ticker = params['ticker']
# make call to yahoo finance api to get historical stock data
api_url = 'https://chartapi.finance.yahoo.com/instrument/1.0/{}/chartdata;type=quote;range=3m/json'.format(ticker)
result = urllib2.urlopen(api_url).read()
data = json.loads(result.decode('utf-8').replace('finance_charts_json_callback( ','')[:-1]) # strip away the javascript and load json
self.company_name = data['meta']['Company-Name']
df = pd.DataFrame.from_records(data['series'])
df['Date'] = pd.to_datetime(df['Date'],format='%Y%m%d')
self.data_cache = df # caching layer
self.params_cache = params # caching layer
return self.data_cache
def getPlot(self, params):
### implements a simple caching mechanism to avoid multiple calls to the yahoo finance api ###
params.pop("output_id",None)
while self.params_cache!=params:
pass
###############################################################################################
df = self.getData(params)
plt_obj = df.set_index('Date').drop(['volume'],axis=1).plot()
plt_obj.set_ylabel("Price")
plt_obj.set_title(self.company_name)
fig = plt_obj.get_figure()
return fig
if __name__ == '__main__':
app = StockExample()
app.launch(port=9093)
| mit | -3,858,695,206,226,695,700 | 31.256757 | 137 | 0.607457 | false |
chongtianfeiyu/kbengine | kbe/res/scripts/common/Lib/pty.py | 120 | 4763 | """Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
fds.remove(master_fd)
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except OSError:
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
return os.waitpid(pid, 0)[1]
| lgpl-3.0 | 3,724,770,915,884,572,000 | 27.017647 | 76 | 0.572538 | false |
shravya-ks/ECN-ns3 | src/bridge/bindings/modulegen__gcc_LP64.py | 2 | 194047 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.bridge', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper [class]
module.add_class('BridgeHelper')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NetDeviceQueue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NetDeviceQueue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## net-device.h (module 'network'): ns3::NetDeviceQueue [class]
module.add_class('NetDeviceQueue', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >'])
## net-device.h (module 'network'): ns3::NetDeviceQueueInterface [class]
module.add_class('NetDeviceQueueInterface', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::QueueItem [class]
module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
## net-device.h (module 'network'): ns3::QueueItem::Uint8Values [enumeration]
module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel [class]
module.add_class('BridgeChannel', parent=root_module['ns3::Channel'])
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice [class]
module.add_class('BridgeNetDevice', parent=root_module['ns3::NetDevice'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3BridgeHelper_methods(root_module, root_module['ns3::BridgeHelper'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >'])
register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NetDeviceQueue_methods(root_module, root_module['ns3::NetDeviceQueue'])
register_Ns3NetDeviceQueueInterface_methods(root_module, root_module['ns3::NetDeviceQueueInterface'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BridgeChannel_methods(root_module, root_module['ns3::BridgeChannel'])
register_Ns3BridgeNetDevice_methods(root_module, root_module['ns3::BridgeNetDevice'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3BridgeHelper_methods(root_module, cls):
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper(ns3::BridgeHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BridgeHelper const &', 'arg0')])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper() [constructor]
cls.add_constructor([])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(std::string nodeName, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): void ns3::BridgeHelper::SetDeviceAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetDeviceAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter< ns3::NetDeviceQueue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NetDeviceQueue_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue(ns3::NetDeviceQueue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceQueue const &', 'arg0')])
## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): bool ns3::NetDeviceQueue::IsStopped() const [member function]
cls.add_method('IsStopped',
'bool',
[],
is_const=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetWakeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetWakeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::Start() [member function]
cls.add_method('Start',
'void',
[],
is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueue::Wake() [member function]
cls.add_method('Wake',
'void',
[],
is_virtual=True)
return
def register_Ns3NetDeviceQueueInterface_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface(ns3::NetDeviceQueueInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceQueueInterface const &', 'arg0')])
## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::CreateTxQueues() [member function]
cls.add_method('CreateTxQueues',
'void',
[])
## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetNTxQueues() const [member function]
cls.add_method('GetNTxQueues',
'uint8_t',
[],
is_const=True)
## net-device.h (module 'network'): ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::NetDeviceQueueInterface::GetSelectQueueCallback() const [member function]
cls.add_method('GetSelectQueueCallback',
'ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::NetDeviceQueue> ns3::NetDeviceQueueInterface::GetTxQueue(uint8_t i) const [member function]
cls.add_method('GetTxQueue',
'ns3::Ptr< ns3::NetDeviceQueue >',
[param('uint8_t', 'i')],
is_const=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDeviceQueueInterface::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetSelectQueueCallback(ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetSelectQueueCallback',
'void',
[param('ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetTxQueuesN(uint8_t numTxQueues) [member function]
cls.add_method('SetTxQueuesN',
'void',
[param('uint8_t', 'numTxQueues')])
## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3QueueItem_methods(root_module, cls):
cls.add_output_stream_operator()
## net-device.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')])
## net-device.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function]
cls.add_method('GetPacket',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## net-device.h (module 'network'): uint32_t ns3::QueueItem::GetPacketSize() const [member function]
cls.add_method('GetPacketSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::QueueItem::GetUint8Value(ns3::QueueItem::Uint8Values field, uint8_t & value) const [member function]
cls.add_method('GetUint8Value',
'bool',
[param('ns3::QueueItem::Uint8Values', 'field'), param('uint8_t &', 'value')],
is_const=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3BridgeChannel_methods(root_module, cls):
## bridge-channel.h (module 'bridge'): static ns3::TypeId ns3::BridgeChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel::BridgeChannel() [constructor]
cls.add_constructor([])
## bridge-channel.h (module 'bridge'): void ns3::BridgeChannel::AddChannel(ns3::Ptr<ns3::Channel> bridgedChannel) [member function]
cls.add_method('AddChannel',
'void',
[param('ns3::Ptr< ns3::Channel >', 'bridgedChannel')])
## bridge-channel.h (module 'bridge'): uint32_t ns3::BridgeChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-channel.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeChannel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3BridgeNetDevice_methods(root_module, cls):
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice::BridgeNetDevice() [constructor]
cls.add_constructor([])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddBridgePort(ns3::Ptr<ns3::NetDevice> bridgePort) [member function]
cls.add_method('AddBridgePort',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'bridgePort')])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetBridgePort(uint32_t n) const [member function]
cls.add_method('GetBridgePort',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'n')],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Channel> ns3::BridgeNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint16_t ns3::BridgeNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetNBridgePorts() const [member function]
cls.add_method('GetNBridgePorts',
'uint32_t',
[],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Node> ns3::BridgeNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): static ns3::TypeId ns3::BridgeNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardBroadcast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardBroadcast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardUnicast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardUnicast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetLearnedState(ns3::Mac48Address source) [member function]
cls.add_method('GetLearnedState',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Mac48Address', 'source')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::Learn(ns3::Mac48Address source, ns3::Ptr<ns3::NetDevice> port) [member function]
cls.add_method('Learn',
'void',
[param('ns3::Mac48Address', 'source'), param('ns3::Ptr< ns3::NetDevice >', 'port')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ReceiveFromDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('ReceiveFromDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')],
visibility='protected')
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 | -2,043,327,947,544,714,200 | 63.339191 | 383 | 0.615279 | false |
ilc/imgserv | paste/util/doctest24.py | 26 | 99418 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename)#?, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| mit | -7,089,454,714,428,406,000 | 36.305066 | 79 | 0.570209 | false |
pearlcoin-project/pearlcoin | qa/rpc-tests/keypool.py | 86 | 3165 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class KeyPoolTest(BitcoinTestFramework):
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, self.options.tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].validateaddress(addr)
wallet_info = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException as e:
assert(e.error['code']==-12)
# put three new keys in the keypool
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain the keys
addr = set()
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
# assert that four unique addresses were returned
assert(len(addr) == 4)
# the next one should fail
try:
addr = nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after three addresses')
except JSONRPCException as e:
assert(e.error['code']==-12)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
try:
nodes[0].generate(1)
raise AssertionError('Keypool should be exhausted after three addesses')
except JSONRPCException as e:
assert(e.error['code']==-12)
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_network(self):
self.nodes = self.setup_nodes()
if __name__ == '__main__':
KeyPoolTest().main()
| mit | -1,153,189,105,015,846,400 | 36.235294 | 96 | 0.62812 | false |
petemounce/ansible | lib/ansible/module_utils/rax.py | 27 | 12110 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import re
from uuid import UUID
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils.six import text_type, binary_type
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception as e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='path', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(type='bool'),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception as e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
| gpl-3.0 | -1,512,380,352,839,354,600 | 35.475904 | 78 | 0.601569 | false |
SciTools/cube_browser | lib/cube_browser/explorer.py | 1 | 15222 | from collections import OrderedDict
import glob
import os
try:
# Python 3
from urllib.parse import urlparse, parse_qs
except ImportError:
# Python 2
from urlparse import urlparse, parse_qs
import IPython.display
import cartopy.crs as ccrs
import ipywidgets
import iris
import iris.plot as iplt
import matplotlib.pyplot as plt
import traitlets
import cube_browser
# Clear output, such as autosave disable notification.
IPython.display.clear_output()
class FilePicker(object):
"""
File picker widgets.
"""
def __init__(self, initial_value='', default=''):
if initial_value == '':
try:
initial_value = iris.sample_data_path('')
except ValueError:
initial_value = ''
# Define the file system path for input files.
self._path = ipywidgets.Text(
description='Path:',
value=initial_value,
width="100%")
# Observe the path.
self._path.observe(self._handle_path, names='value')
# Use default path value to initialise file options.
options = []
if os.path.exists(self._path.value):
options = glob.glob('{}/*'.format(self._path.value))
options.sort()
default_list = []
for default_value in default.split(','):
if default_value in options:
default_list.append(default_value)
default_tuple = tuple(default_list)
# Defines the files selected to be loaded.
self._files = ipywidgets.SelectMultiple(
description='Files:',
options=OrderedDict([(os.path.basename(f), f)
for f in options]),
value=default_tuple,
width="100%"
)
self.deleter = ipywidgets.Button(description='delete tab',
height='32px', width='75px')
hbox = ipywidgets.HBox(children=[self._files, self.deleter])
self._box = ipywidgets.Box(children=[self._path, hbox], width="100%")
@property
def files(self):
"""The files from the FilePicker."""
return self._files.value
def _handle_path(self, sender):
"""Path box action."""
if os.path.exists(self._path.value):
options = glob.glob('{}/*'.format(self._path.value))
options.sort()
self._files.value = ()
self._files.options = OrderedDict([(os.path.basename(f), f)
for f in options])
else:
self._files.options = OrderedDict()
self._files.width = "100%"
@property
def box(self):
"""The IPywidgets box to display."""
return self._box
class PlotControl(object):
"""Control widgets for a plot."""
def __init__(self):
self.mpl_kwargs = {}
# Defines the cube which is to be plotted.
self.cube_picker = ipywidgets.Dropdown(description='Cubes:',
options=('None', None),
value=None,
width='50%')
# Define the type of cube browser plot required
self.plot_type = ipywidgets.Dropdown(
description='Plot type:',
options={'pcolormesh': cube_browser.Pcolormesh,
'contour': cube_browser.Contour,
'contourf': cube_browser.Contourf},
value=cube_browser.Pcolormesh)
self.x_coord = ipywidgets.Dropdown(
description='X Coord',
options=('None', None))
self.y_coord = ipywidgets.Dropdown(
description='Y Coord',
options=('None', None))
self.cmap = ipywidgets.Text(
description='colour map')
# Handle events:
self.cube_picker.observe(self._handle_cube_selection,
names='value')
self.cmap.observe(self._handle_cmap, names='value')
self.plot_type.observe(self._handle_plot_type, names='value')
self._box = ipywidgets.Box(children=[self.cube_picker,
self.plot_type,
self.x_coord,
self.y_coord,
self.cmap])
def _handle_cube_selection(self, sender):
"""Cube selector action."""
if self.cube_picker.value is not None:
cube = self.cube_picker.cubes[self.cube_picker.value]
options = [('None', None)]
options += [(coord.name(), coord.name()) for coord in
cube.coords(dim_coords=True)]
ndims = cube.ndim
for i in range(ndims):
options.append(('dim{}'.format(i), i))
self.x_coord.options = options
if (cube.coords(axis='X', dim_coords=True) and
cube.coord(axis='X', dim_coords=True).name() in
[o[1] for o in self.x_coord.options]):
default = cube.coord(axis='X', dim_coords=True).name()
self.x_coord.value = default
self.y_coord.options = options
if (cube.coords(axis='Y', dim_coords=True) and
cube.coord(axis='Y', dim_coords=True).name() in
[o[1] for o in self.y_coord.options]):
default = cube.coord(axis='Y', dim_coords=True).name()
self.y_coord.value = default
def _handle_cmap(self, sender):
# This tests that the colour map string is valid: else warns.
from matplotlib.cm import cmap_d
cmap_string = self.cmap.value
if cmap_string and cmap_string in cmap_d.keys():
self.mpl_kwargs['cmap'] = cmap_string
self.cmap.description = 'colour map'
else:
self.cmap.description = 'not a cmap'
def _handle_plot_type(self, sender):
cmap = self.cmap.value
self.mpl_kwargs = {}
if cmap:
self.mpl_kwargs['cmap'] = cmap
@property
def box(self):
"""The IPywidgets box to display."""
return self._box
class Explorer(traitlets.HasTraits):
"""
IPyWidgets and workflow for exploring collections of cubes.
"""
_cubes = traitlets.List()
def __init__(self, url=''):
self.file_pickers = []
if url:
o = urlparse(url)
query = parse_qs(o.query)
pwd, = query.get('pwd', [''])
for fname in query.get('files', []):
self.file_pickers.append(FilePicker(pwd, os.path.join(pwd, fname)))
for fpath in query.get('folders', []):
self.file_pickers.append(FilePicker(fpath))
if not self.file_pickers:
self.file_pickers.append(FilePicker())
# Define load action.
self._load_button = ipywidgets.Button(description="load these files")
self._load_button.on_click(self._handle_load)
self._file_tab_button = ipywidgets.Button(description="add tab")
self._file_tab_button.on_click(self._handle_new_tab)
self._subplots = ipywidgets.RadioButtons(description='subplots',
options=[1, 2])
self._subplots.observe(self._handle_nplots, names='value')
# Plot action button.
self._plot_button = ipywidgets.Button(description="Plot my cube")
self._plot_button.on_click(self._goplot)
# Configure layout of the Explorer.
self._plot_container = ipywidgets.Box()
# Define a Tab container for the main controls in the browse interface.
children = [fp.box for fp in self.file_pickers]
self.ftabs = ipywidgets.Tab(children=children)
children = [self._load_button, self._file_tab_button]
self.bbox = ipywidgets.HBox(children=children)
children = [self.ftabs, self.bbox]
self._file_picker_tabs = ipywidgets.Box(children=children)
# Define the plot controls, start with 1 (self._subplots default)
self.plot_controls = [PlotControl()]
pcc_children = [pc.box for pc in self.plot_controls]
self._plot_control_container = ipywidgets.Tab(children=pcc_children)
self._plot_control_container.set_title(0, 'Plot Axes 0')
# Define an Accordian for files, subplots and plots
acc_children = [self._file_picker_tabs, self._subplots,
self._plot_control_container]
self._accord = ipywidgets.Accordion(children=acc_children)
self._accord.set_title(0, 'Files')
self._accord.set_title(1, 'SubPlots')
self._accord.set_title(2, 'Plots')
# Initialise cubes container
self._cubes = []
# Display the browse interface.
IPython.display.display(self._accord)
IPython.display.display(self._plot_button)
IPython.display.display(self._plot_container)
@property
def mpl_kwargs(self):
"""
The list of dictionaries of matplotlib keyword arguements in use
the PlotControls.
"""
return [pc.mpl_kwargs for pc in self.plot_controls]
@property
def cubes(self):
"""The list of cubes the explorer is currently working with."""
return self._cubes
@cubes.setter
def cubes(self, new_cubes):
"""To update the list of cubes the explorer is working with."""
self._cubes = new_cubes
@traitlets.observe('_cubes')
def update_cubes_list(self, change=None):
"""
Update the list of cubes available in the Explorer.
Assigning an updated list into `cubes` automatically runs this.
"""
# Build options list, using index values into the cube list.
# This avoids the loading of cube's data payload when the
# widget tests equality on selection.
options = [('{}: {}'.format(i, cube.summary(shorten=True)), i)
for i, cube in enumerate(self._cubes)]
for pc in self.plot_controls:
# Provide the cubes list to the cube_picker, to index into.
pc.cube_picker.cubes = self._cubes
pc.cube_picker.options = [('None', None)] + pc.cube_picker.options
pc.cube_picker.value = None
pc.cube_picker.options = [('None', None)] + options
if options:
pc.cube_picker.value = options[0][1]
pc.cube_picker.options = options
def _handle_load(self, sender):
"""Load button action."""
IPython.display.clear_output()
sender.description = 'loading......'
fpfs = [fp.files for fp in self.file_pickers]
selected_files = reduce(list.__add__, (list(files) for files in fpfs))
# Reassigning into self._cubes updates the cube_pickers.
self._cubes = iris.load(selected_files)
self._cubes = self._cubes.concatenate()
sender.description = 'files loaded, reload'
IPython.display.clear_output()
def _handle_new_tab(self, sender):
"""Add new file tab."""
self.file_pickers.append(FilePicker())
self._update_filepickers()
def _update_filepickers(self):
children = [fp.box for fp in self.file_pickers]
for i, child in enumerate(children):
fp.deleter.index = i
fp.deleter.on_click(self._handle_delete_tab)
self.ftabs = ipywidgets.Tab(children=children)
self._file_picker_tabs.children = [self.ftabs, self.bbox]
def _handle_delete_tab(self, sender):
"""remove a file tab"""
self.file_pickers.pop(sender.index)
self._update_filepickers()
def _handle_nplots(self, sender):
if self._subplots.value == 1:
self.plot_controls = [self.plot_controls[0]]
elif self._subplots.value == 2:
self.plot_controls = [self.plot_controls[0], PlotControl()]
pcc_children = [pc.box for pc in self.plot_controls]
self._plot_control_container.children = pcc_children
for i in range(self._subplots.value):
label = 'Plot Axes {}'.format(i)
self._plot_control_container.set_title(i, label)
self.update_cubes_list()
def _goplot(self, sender):
"""Create the cube_browser.Plot2D and cube_browser.Browser"""
IPython.display.clear_output()
fig = plt.figure(figsize=(16, 7))
sub_plots = 110
if self._subplots.value == 2:
sub_plots = 120
confs = []
for spl, pc in enumerate(self.plot_controls):
spl += 1
cube = None
if pc.cube_picker.value is not None:
cube = self.cubes[pc.cube_picker.value]
if cube and spl <= self._subplots.value:
pc_x_name = pc.x_coord.value
pc_y_name = pc.y_coord.value
x_coords = cube.coords(axis='X', dim_coords=True)
if len(x_coords) == 1:
x_name = x_coords[0].name()
else:
x_name = None
y_coords = cube.coords(axis='Y', dim_coords=True)
if len(y_coords) == 1:
y_name = y_coords[0].name()
else:
y_name = None
if x_name == pc_x_name and y_name == pc_y_name:
proj = iplt.default_projection(cube) or ccrs.PlateCarree()
ax = fig.add_subplot(sub_plots + spl, projection=proj)
# If the spatial extent is small, use high-res coastlines
extent = iplt.default_projection_extent(cube)
x0, y0 = ccrs.PlateCarree().transform_point(extent[0],
extent[2],
proj)
x1, y1 = ccrs.PlateCarree().transform_point(extent[1],
extent[3],
proj)
if x1-x0 < 20 and y1-y0 < 20:
ax.coastlines(resolution='10m')
elif x1-x0 < 180 and y1-y0 < 90:
ax.coastlines(resolution='50m')
else:
ax.coastlines()
else:
ax = plt.gca()
ax = fig.add_subplot(sub_plots+spl)
plot_type = pc.plot_type
coords = [pc_x_name, pc_y_name]
confs.append(plot_type.value(cube, ax, coords=coords,
**pc.mpl_kwargs))
title = cube.name().replace('_', ' ').capitalize()
ax.set_title(title)
self.browser = cube_browser.Browser(confs)
self.browser.on_change(None)
# For each PlotControl, assign the plot's mpl_kwargs back to
# that PlotControl.
for pc, plot in zip(self.plot_controls, confs):
pc.mpl_kwargs = plot.kwargs
self._plot_container.children = [self.browser.form]
| bsd-3-clause | -8,911,332,586,415,223,000 | 38.952756 | 83 | 0.543687 | false |
meduz/scikit-learn | benchmarks/bench_plot_lasso_path.py | 84 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause | -3,816,992,223,906,853,400 | 33.230769 | 76 | 0.546567 | false |
naturali/tensorflow | tensorflow/contrib/learn/python/learn/__init__.py | 4 | 2319 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning with TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import learn_io as io
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.contrib.learn.python.learn import preprocessing
from tensorflow.contrib.learn.python.learn import utils
from tensorflow.contrib.learn.python.learn.dataframe import *
from tensorflow.contrib.learn.python.learn.estimators import *
from tensorflow.contrib.learn.python.learn.evaluable import Evaluable
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import run_feeds
from tensorflow.contrib.learn.python.learn.graph_actions import run_n
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.contrib.learn.python.learn.monitors import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.trainable import Trainable
# pylint: enable=wildcard-import
| apache-2.0 | 5,560,394,871,281,778,000 | 49.413043 | 85 | 0.793014 | false |
aaronsw/watchdog | vendor/pyExcelerator-0.6.3a/build/lib/pyExcelerator/Worksheet.py | 3 | 43893 | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Roman V. Kiseliov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <[email protected]>."
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <[email protected]>."
#
# THIS SOFTWARE IS PROVIDED BY Roman V. Kiseliov ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Roman V. Kiseliov OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
'''
BOF
UNCALCED
INDEX
Calculation Settings Block
PRINTHEADERS
PRINTGRIDLINES
GRIDSET
GUTS
DEFAULTROWHEIGHT
WSBOOL
Page Settings Block
Worksheet Protection Block
DEFCOLWIDTH
COLINFO
SORT
DIMENSIONS
Row Blocks
WINDOW2
SCL
PANE
SELECTION
STANDARDWIDTH
MERGEDCELLS
LABELRANGES
PHONETIC
Conditional Formatting Table
Hyperlink Table
Data Validity Table
SHEETLAYOUT (BIFF8X only)
SHEETPROTECTION (BIFF8X only)
RANGEPROTECTION (BIFF8X only)
EOF
'''
__rev_id__ = """$Id: Worksheet.py,v 1.7 2005/08/11 08:53:48 rvk Exp $"""
import BIFFRecords
import Bitmap
import Formatting
import Style
from Deco import *
class Worksheet(object):
from Workbook import Workbook
#################################################################
## Constructor
#################################################################
@accepts(object, (str, unicode), Workbook)
def __init__(self, sheetname, parent_book):
import Row
self.Row = Row.Row
import Column
self.Column = Column.Column
self.__name = sheetname
self.__parent = parent_book
self.__rows = {}
self.__cols = {}
self.__merged_ranges = []
self.__bmp_rec = ''
self.__show_formulas = 0
self.__show_grid = 1
self.__show_headers = 1
self.__panes_frozen = 0
self.__show_empty_as_zero = 1
self.__auto_colour_grid = 1
self.__cols_right_to_left = 0
self.__show_outline = 1
self.__remove_splits = 0
self.__selected = 0
self.__hidden = 0
self.__page_preview = 0
self.__first_visible_row = 0
self.__first_visible_col = 0
self.__grid_colour = 0x40
self.__preview_magn = 0
self.__normal_magn = 0
self.__vert_split_pos = None
self.__horz_split_pos = None
self.__vert_split_first_visible = None
self.__horz_split_first_visible = None
self.__split_active_pane = None
self.__row_gut_width = 0
self.__col_gut_height = 0
self.__show_auto_page_breaks = 1
self.__dialogue_sheet = 0
self.__auto_style_outline = 0
self.__outline_below = 0
self.__outline_right = 0
self.__fit_num_pages = 0
self.__show_row_outline = 1
self.__show_col_outline = 1
self.__alt_expr_eval = 0
self.__alt_formula_entries = 0
self.__row_default_height = 0x00FF
self.__col_default_width = 0x0008
self.__calc_mode = 1
self.__calc_count = 0x0064
self.__RC_ref_mode = 1
self.__iterations_on = 0
self.__delta = 0.001
self.__save_recalc = 0
self.__print_headers = 0
self.__print_grid = 0
self.__grid_set = 1
self.__vert_page_breaks = []
self.__horz_page_breaks = []
self.__header_str = '&P'
self.__footer_str = '&F'
self.__print_centered_vert = 0
self.__print_centered_horz = 1
self.__left_margin = 0.3 #0.5
self.__right_margin = 0.3 #0.5
self.__top_margin = 0.61 #1.0
self.__bottom_margin = 0.37 #1.0
self.__paper_size_code = 9 # A4
self.__print_scaling = 100
self.__start_page_number = 1
self.__fit_width_to_pages = 1
self.__fit_height_to_pages = 1
self.__print_in_rows = 1
self.__portrait = 1
self.__print_not_colour = 0
self.__print_draft = 0
self.__print_notes = 0
self.__print_notes_at_end = 0
self.__print_omit_errors = 0
self.__print_hres = 0x012C # 300 dpi
self.__print_vres = 0x012C # 300 dpi
self.__header_margin = 0.1
self.__footer_margin = 0.1
self.__copies_num = 1
self.__wnd_protect = 0
self.__obj_protect = 0
self.__protect = 0
self.__scen_protect = 0
self.__password = ''
#################################################################
## Properties, "getters", "setters"
#################################################################
@accepts(object, (str, unicode))
def set_name(self, value):
self.__name = value
def get_name(self):
return self.__name
name = property(get_name, set_name)
#################################################################
def get_parent(self):
return self.__parent
parent = property(get_parent)
#################################################################
def get_rows(self):
return self.__rows
rows = property(get_rows)
#################################################################
def get_cols(self):
return self.__cols
cols = property(get_cols)
#################################################################
def get_merged_ranges(self):
return self.__merged_ranges
merged_ranges = property(get_merged_ranges)
#################################################################
def get_bmp_rec(self):
return self.__bmp_rec
bmp_rec = property(get_bmp_rec)
#################################################################
@accepts(object, bool)
def set_show_formulas(self, value):
self.__show_formulas = int(value)
def get_show_formulas(self):
return bool(self.__show_formulas)
show_formulas = property(get_show_formulas, set_show_formulas)
#################################################################
@accepts(object, bool)
def set_show_grid(self, value):
self.__show_grid = int(value)
def get_show_grid(self):
return bool(self.__show_grid)
show_grid = property(get_show_grid, set_show_grid)
#################################################################
@accepts(object, bool)
def set_show_headers(self, value):
self.__show_headers = int(value)
def get_show_headers(self):
return bool(self.__show_headers)
show_headers = property(get_show_headers, set_show_headers)
#################################################################
@accepts(object, bool)
def set_panes_frozen(self, value):
self.__panes_frozen = int(value)
def get_panes_frozen(self):
return bool(self.__panes_frozen)
panes_frozen = property(get_panes_frozen, set_panes_frozen)
#################################################################
@accepts(object, bool)
def set_show_empty_as_zero(self, value):
self.__show_empty_as_zero = int(value)
def get_show_empty_as_zero(self):
return bool(self.__show_empty_as_zero)
show_empty_as_zero = property(get_show_empty_as_zero, set_show_empty_as_zero)
#################################################################
@accepts(object, bool)
def set_auto_colour_grid(self, value):
self.__auto_colour_grid = int(value)
def get_auto_colour_grid(self):
return bool(self.__auto_colour_grid)
auto_colour_grid = property(get_auto_colour_grid, set_auto_colour_grid)
#################################################################
@accepts(object, bool)
def set_cols_right_to_left(self, value):
self.__cols_right_to_left = int(value)
def get_cols_right_to_left(self):
return bool(self.__cols_right_to_left)
cols_right_to_left = property(get_cols_right_to_left, set_cols_right_to_left)
#################################################################
@accepts(object, bool)
def set_show_outline(self, value):
self.__show_outline = int(value)
def get_show_outline(self):
return bool(self.__show_outline)
show_outline = property(get_show_outline, set_show_outline)
#################################################################
@accepts(object, bool)
def set_remove_splits(self, value):
self.__remove_splits = int(value)
def get_remove_splits(self):
return bool(self.__remove_splits)
remove_splits = property(get_remove_splits, set_remove_splits)
#################################################################
@accepts(object, bool)
def set_selected(self, value):
self.__selected = int(value)
def get_selected(self):
return bool(self.__selected)
selected = property(get_selected, set_selected)
#################################################################
@accepts(object, bool)
def set_hidden(self, value):
self.__hidden = int(value)
def get_hidden(self):
return bool(self.__hidden)
hidden = property(get_hidden, set_hidden)
#################################################################
@accepts(object, bool)
def set_page_preview(self, value):
self.__page_preview = int(value)
def get_page_preview(self):
return bool(self.__page_preview)
page_preview = property(get_page_preview, set_page_preview)
#################################################################
@accepts(object, int)
def set_first_visible_row(self, value):
self.__first_visible_row = value
def get_first_visible_row(self):
return self.__first_visible_row
first_visible_row = property(get_first_visible_row, set_first_visible_row)
#################################################################
@accepts(object, int)
def set_first_visible_col(self, value):
self.__first_visible_col = value
def get_first_visible_col(self):
return self.__first_visible_col
first_visible_col = property(get_first_visible_col, set_first_visible_col)
#################################################################
@accepts(object, int)
def set_grid_colour(self, value):
self.__grid_colour = value
def get_grid_colour(self):
return self.__grid_colour
grid_colour = property(get_grid_colour, set_grid_colour)
#################################################################
@accepts(object, int)
def set_preview_magn(self, value):
self.__preview_magn = value
def get_preview_magn(self):
return self.__preview_magn
preview_magn = property(get_preview_magn, set_preview_magn)
#################################################################
@accepts(object, int)
def set_normal_magn(self, value):
self.__normal_magn = value
def get_normal_magn(self):
return self.__normal_magn
normal_magn = property(get_normal_magn, set_normal_magn)
#################################################################
@accepts(object, int)
def set_vert_split_pos(self, value):
self.__vert_split_pos = abs(value)
def get_vert_split_pos(self):
return self.__vert_split_pos
vert_split_pos = property(get_vert_split_pos, set_vert_split_pos)
#################################################################
@accepts(object, int)
def set_horz_split_pos(self, value):
self.__horz_split_pos = abs(value)
def get_horz_split_pos(self):
return self.__horz_split_pos
horz_split_pos = property(get_horz_split_pos, set_horz_split_pos)
#################################################################
@accepts(object, int)
def set_vert_split_first_visible(self, value):
self.__vert_split_first_visible = abs(value)
def get_vert_split_first_visible(self):
return self.__vert_split_first_visible
vert_split_first_visible = property(get_vert_split_first_visible, set_vert_split_first_visible)
#################################################################
@accepts(object, int)
def set_horz_split_first_visible(self, value):
self.__horz_split_first_visible = abs(value)
def get_horz_split_first_visible(self):
return self.__horz_split_first_visible
horz_split_first_visible = property(get_horz_split_first_visible, set_horz_split_first_visible)
#################################################################
#@accepts(object, int)
#def set_split_active_pane(self, value):
# self.__split_active_pane = abs(value) & 0x03
#
#def get_split_active_pane(self):
# return self.__split_active_pane
#
#split_active_pane = property(get_split_active_pane, set_split_active_pane)
#################################################################
#@accepts(object, int)
#def set_row_gut_width(self, value):
# self.__row_gut_width = value
#
#def get_row_gut_width(self):
# return self.__row_gut_width
#
#row_gut_width = property(get_row_gut_width, set_row_gut_width)
#
#################################################################
#
#@accepts(object, int)
#def set_col_gut_height(self, value):
# self.__col_gut_height = value
#
#def get_col_gut_height(self):
# return self.__col_gut_height
#
#col_gut_height = property(get_col_gut_height, set_col_gut_height)
#
#################################################################
@accepts(object, bool)
def set_show_auto_page_breaks(self, value):
self.__show_auto_page_breaks = int(value)
def get_show_auto_page_breaks(self):
return bool(self.__show_auto_page_breaks)
show_auto_page_breaks = property(get_show_auto_page_breaks, set_show_auto_page_breaks)
#################################################################
@accepts(object, bool)
def set_dialogue_sheet(self, value):
self.__dialogue_sheet = int(value)
def get_dialogue_sheet(self):
return bool(self.__dialogue_sheet)
dialogue_sheet = property(get_dialogue_sheet, set_dialogue_sheet)
#################################################################
@accepts(object, bool)
def set_auto_style_outline(self, value):
self.__auto_style_outline = int(value)
def get_auto_style_outline(self):
return bool(self.__auto_style_outline)
auto_style_outline = property(get_auto_style_outline, set_auto_style_outline)
#################################################################
@accepts(object, bool)
def set_outline_below(self, value):
self.__outline_below = int(value)
def get_outline_below(self):
return bool(self.__outline_below)
outline_below = property(get_outline_below, set_outline_below)
#################################################################
@accepts(object, bool)
def set_outline_right(self, value):
self.__outline_right = int(value)
def get_outline_right(self):
return bool(self.__outline_right)
outline_right = property(get_outline_right, set_outline_right)
#################################################################
@accepts(object, int)
def set_fit_num_pages(self, value):
self.__fit_num_pages = value
def get_fit_num_pages(self):
return self.__fit_num_pages
fit_num_pages = property(get_fit_num_pages, set_fit_num_pages)
#################################################################
@accepts(object, bool)
def set_show_row_outline(self, value):
self.__show_row_outline = int(value)
def get_show_row_outline(self):
return bool(self.__show_row_outline)
show_row_outline = property(get_show_row_outline, set_show_row_outline)
#################################################################
@accepts(object, bool)
def set_show_col_outline(self, value):
self.__show_col_outline = int(value)
def get_show_col_outline(self):
return bool(self.__show_col_outline)
show_col_outline = property(get_show_col_outline, set_show_col_outline)
#################################################################
@accepts(object, bool)
def set_alt_expr_eval(self, value):
self.__alt_expr_eval = int(value)
def get_alt_expr_eval(self):
return bool(self.__alt_expr_eval)
alt_expr_eval = property(get_alt_expr_eval, set_alt_expr_eval)
#################################################################
@accepts(object, bool)
def set_alt_formula_entries(self, value):
self.__alt_formula_entries = int(value)
def get_alt_formula_entries(self):
return bool(self.__alt_formula_entries)
alt_formula_entries = property(get_alt_formula_entries, set_alt_formula_entries)
#################################################################
@accepts(object, int)
def set_row_default_height(self, value):
self.__row_default_height = value
def get_row_default_height(self):
return self.__row_default_height
row_default_height = property(get_row_default_height, set_row_default_height)
#################################################################
@accepts(object, int)
def set_col_default_width(self, value):
self.__col_default_width = value
def get_col_default_width(self):
return self.__col_default_width
col_default_width = property(get_col_default_width, set_col_default_width)
#################################################################
@accepts(object, int)
def set_calc_mode(self, value):
self.__calc_mode = value & 0x03
def get_calc_mode(self):
return self.__calc_mode
calc_mode = property(get_calc_mode, set_calc_mode)
#################################################################
@accepts(object, int)
def set_calc_count(self, value):
self.__calc_count = value
def get_calc_count(self):
return self.__calc_count
calc_count = property(get_calc_count, set_calc_count)
#################################################################
@accepts(object, bool)
def set_RC_ref_mode(self, value):
self.__RC_ref_mode = int(value)
def get_RC_ref_mode(self):
return bool(self.__RC_ref_mode)
RC_ref_mode = property(get_RC_ref_mode, set_RC_ref_mode)
#################################################################
@accepts(object, bool)
def set_iterations_on(self, value):
self.__iterations_on = int(value)
def get_iterations_on(self):
return bool(self.__iterations_on)
iterations_on = property(get_iterations_on, set_iterations_on)
#################################################################
@accepts(object, float)
def set_delta(self, value):
self.__delta = value
def get_delta(self):
return self.__delta
delta = property(get_delta, set_delta)
#################################################################
@accepts(object, bool)
def set_save_recalc(self, value):
self.__save_recalc = int(value)
def get_save_recalc(self):
return bool(self.__save_recalc)
save_recalc = property(get_save_recalc, set_save_recalc)
#################################################################
@accepts(object, bool)
def set_print_headers(self, value):
self.__print_headers = int(value)
def get_print_headers(self):
return bool(self.__print_headers)
print_headers = property(get_print_headers, set_print_headers)
#################################################################
@accepts(object, bool)
def set_print_grid(self, value):
self.__print_grid = int(value)
def get_print_grid(self):
return bool(self.__print_grid)
print_grid = property(get_print_grid, set_print_grid)
#################################################################
#
#@accepts(object, bool)
#def set_grid_set(self, value):
# self.__grid_set = int(value)
#
#def get_grid_set(self):
# return bool(self.__grid_set)
#
#grid_set = property(get_grid_set, set_grid_set)
#
#################################################################
@accepts(object, list)
def set_vert_page_breaks(self, value):
self.__vert_page_breaks = value
def get_vert_page_breaks(self):
return self.__vert_page_breaks
vert_page_breaks = property(get_vert_page_breaks, set_vert_page_breaks)
#################################################################
@accepts(object, list)
def set_horz_page_breaks(self, value):
self.__horz_page_breaks = value
def get_horz_page_breaks(self):
return self.__horz_page_breaks
horz_page_breaks = property(get_horz_page_breaks, set_horz_page_breaks)
#################################################################
@accepts(object, (str, unicode))
def set_header_str(self, value):
self.__header_str = value
def get_header_str(self):
return self.__header_str
header_str = property(get_header_str, set_header_str)
#################################################################
@accepts(object, (str, unicode))
def set_footer_str(self, value):
self.__footer_str = value
def get_footer_str(self):
return self.__footer_str
footer_str = property(get_footer_str, set_footer_str)
#################################################################
@accepts(object, bool)
def set_print_centered_vert(self, value):
self.__print_centered_vert = int(value)
def get_print_centered_vert(self):
return bool(self.__print_centered_vert)
print_centered_vert = property(get_print_centered_vert, set_print_centered_vert)
#################################################################
@accepts(object, bool)
def set_print_centered_horz(self, value):
self.__print_centered_horz = int(value)
def get_print_centered_horz(self):
return bool(self.__print_centered_horz)
print_centered_horz = property(get_print_centered_horz, set_print_centered_horz)
#################################################################
@accepts(object, float)
def set_left_margin(self, value):
self.__left_margin = value
def get_left_margin(self):
return self.__left_margin
left_margin = property(get_left_margin, set_left_margin)
#################################################################
@accepts(object, float)
def set_right_margin(self, value):
self.__right_margin = value
def get_right_margin(self):
return self.__right_margin
right_margin = property(get_right_margin, set_right_margin)
#################################################################
@accepts(object, float)
def set_top_margin(self, value):
self.__top_margin = value
def get_top_margin(self):
return self.__top_margin
top_margin = property(get_top_margin, set_top_margin)
#################################################################
@accepts(object, float)
def set_bottom_margin(self, value):
self.__bottom_margin = value
def get_bottom_margin(self):
return self.__bottom_margin
bottom_margin = property(get_bottom_margin, set_bottom_margin)
#################################################################
@accepts(object, int)
def set_paper_size_code(self, value):
self.__paper_size_code = value
def get_paper_size_code(self):
return self.__paper_size_code
paper_size_code = property(get_paper_size_code, set_paper_size_code)
#################################################################
@accepts(object, int)
def set_print_scaling(self, value):
self.__print_scaling = value
def get_print_scaling(self):
return self.__print_scaling
print_scaling = property(get_print_scaling, set_print_scaling)
#################################################################
@accepts(object, int)
def set_start_page_number(self, value):
self.__start_page_number = value
def get_start_page_number(self):
return self.__start_page_number
start_page_number = property(get_start_page_number, set_start_page_number)
#################################################################
@accepts(object, int)
def set_fit_width_to_pages(self, value):
self.__fit_width_to_pages = value
def get_fit_width_to_pages(self):
return self.__fit_width_to_pages
fit_width_to_pages = property(get_fit_width_to_pages, set_fit_width_to_pages)
#################################################################
@accepts(object, int)
def set_fit_height_to_pages(self, value):
self.__fit_height_to_pages = value
def get_fit_height_to_pages(self):
return self.__fit_height_to_pages
fit_height_to_pages = property(get_fit_height_to_pages, set_fit_height_to_pages)
#################################################################
@accepts(object, bool)
def set_print_in_rows(self, value):
self.__print_in_rows = int(value)
def get_print_in_rows(self):
return bool(self.__print_in_rows)
print_in_rows = property(get_print_in_rows, set_print_in_rows)
#################################################################
@accepts(object, bool)
def set_portrait(self, value):
self.__portrait = int(value)
def get_portrait(self):
return bool(self.__portrait)
portrait = property(get_portrait, set_portrait)
#################################################################
@accepts(object, bool)
def set_print_colour(self, value):
self.__print_not_colour = int(not value)
def get_print_colour(self):
return not bool(self.__print_not_colour)
print_colour = property(get_print_colour, set_print_colour)
#################################################################
@accepts(object, bool)
def set_print_draft(self, value):
self.__print_draft = int(value)
def get_print_draft(self):
return bool(self.__print_draft)
print_draft = property(get_print_draft, set_print_draft)
#################################################################
@accepts(object, bool)
def set_print_notes(self, value):
self.__print_notes = int(value)
def get_print_notes(self):
return bool(self.__print_notes)
print_notes = property(get_print_notes, set_print_notes)
#################################################################
@accepts(object, bool)
def set_print_notes_at_end(self, value):
self.__print_notes_at_end = int(value)
def get_print_notes_at_end(self):
return bool(self.__print_notes_at_end)
print_notes_at_end = property(get_print_notes_at_end, set_print_notes_at_end)
#################################################################
@accepts(object, bool)
def set_print_omit_errors(self, value):
self.__print_omit_errors = int(value)
def get_print_omit_errors(self):
return bool(self.__print_omit_errors)
print_omit_errors = property(get_print_omit_errors, set_print_omit_errors)
#################################################################
@accepts(object, int)
def set_print_hres(self, value):
self.__print_hres = value
def get_print_hres(self):
return self.__print_hres
print_hres = property(get_print_hres, set_print_hres)
#################################################################
@accepts(object, int)
def set_print_vres(self, value):
self.__print_vres = value
def get_print_vres(self):
return self.__print_vres
print_vres = property(get_print_vres, set_print_vres)
#################################################################
@accepts(object, float)
def set_header_margin(self, value):
self.__header_margin = value
def get_header_margin(self):
return self.__header_margin
header_margin = property(get_header_margin, set_header_margin)
#################################################################
@accepts(object, float)
def set_footer_margin(self, value):
self.__footer_margin = value
def get_footer_margin(self):
return self.__footer_margin
footer_margin = property(get_footer_margin, set_footer_margin)
#################################################################
@accepts(object, int)
def set_copies_num(self, value):
self.__copies_num = value
def get_copies_num(self):
return self.__copies_num
copies_num = property(get_copies_num, set_copies_num)
##################################################################
@accepts(object, bool)
def set_wnd_protect(self, value):
self.__wnd_protect = int(value)
def get_wnd_protect(self):
return bool(self.__wnd_protect)
wnd_protect = property(get_wnd_protect, set_wnd_protect)
#################################################################
@accepts(object, bool)
def set_obj_protect(self, value):
self.__obj_protect = int(value)
def get_obj_protect(self):
return bool(self.__obj_protect)
obj_protect = property(get_obj_protect, set_obj_protect)
#################################################################
@accepts(object, bool)
def set_protect(self, value):
self.__protect = int(value)
def get_protect(self):
return bool(self.__protect)
protect = property(get_protect, set_protect)
#################################################################
@accepts(object, bool)
def set_scen_protect(self, value):
self.__scen_protect = int(value)
def get_scen_protect(self):
return bool(self.__scen_protect)
scen_protect = property(get_scen_protect, set_scen_protect)
#################################################################
@accepts(object, str)
def set_password(self, value):
self.__password = value
def get_password(self):
return self.__password
password = property(get_password, set_password)
##################################################################
## Methods
##################################################################
def get_parent(self):
return self.__parent
def write(self, r, c, label="", style=Style.XFStyle()):
self.row(r).write(c, label, style)
def merge(self, r1, r2, c1, c2, style=Style.XFStyle()):
self.row(r1).write_blanks(c1, c2, style)
for r in range(r1+1, r2+1):
self.row(r).write_blanks(c1, c2, style)
self.__merged_ranges.append((r1, r2, c1, c2))
def write_merge(self, r1, r2, c1, c2, label="", style=Style.XFStyle()):
self.merge(r1, r2, c1, c2, style)
self.write(r1, c1, label, style)
def insert_bitmap(self, filename, row, col, x = 0, y = 0, scale_x = 1, scale_y = 1):
bmp = Bitmap.ImDataBmpRecord(filename)
obj = Bitmap.ObjBmpRecord(row, col, self, bmp, x, y, scale_x, scale_y)
self.__bmp_rec += obj.get() + bmp.get()
def col(self, indx):
if indx not in self.__cols:
self.__cols[indx] = self.Column(indx, self)
return self.__cols[indx]
def row(self, indx):
if indx not in self.__rows:
self.__rows[indx] = self.Row(indx, self)
return self.__rows[indx]
def row_height(self, row): # in pixels
if row in self.__rows:
return self.__rows[row].get_height_in_pixels()
else:
return 17
def col_width(self, col): # in pixels
#if col in self.__cols:
# return self.__cols[col].width_in_pixels()
#else:
return 64
def get_labels_count(self):
result = 0
for r in self.__rows:
result += self.__rows[r].get_str_count()
return result
##################################################################
## BIFF records generation
##################################################################
def __bof_rec(self):
return BIFFRecords.Biff8BOFRecord(BIFFRecords.Biff8BOFRecord.WORKSHEET).get()
def __guts_rec(self):
row_visible_levels = 0
if len(self.__rows) != 0:
row_visible_levels = max([self.__rows[r].level for r in self.__rows]) + 1
col_visible_levels = 0
if len(self.__cols) != 0:
col_visible_levels = max([self.__cols[c].level for c in self.__cols]) + 1
return BIFFRecords.GutsRecord(self.__row_gut_width, self.__col_gut_height, row_visible_levels, col_visible_levels).get()
def __wsbool_rec(self):
options = 0x00
options |= (self.__show_auto_page_breaks & 0x01) << 0
options |= (self.__dialogue_sheet & 0x01) << 4
options |= (self.__auto_style_outline & 0x01) << 5
options |= (self.__outline_below & 0x01) << 6
options |= (self.__outline_right & 0x01) << 7
options |= (self.__fit_num_pages & 0x01) << 8
options |= (self.__show_row_outline & 0x01) << 10
options |= (self.__show_col_outline & 0x01) << 11
options |= (self.__alt_expr_eval & 0x01) << 14
options |= (self.__alt_formula_entries & 0x01) << 15
return BIFFRecords.WSBoolRecord(options).get()
def __eof_rec(self):
return BIFFRecords.EOFRecord().get()
def __colinfo_rec(self):
result = ''
for col in self.__cols:
result += self.__cols[col].get_biff_record()
return result
def __dimensions_rec(self):
first_used_row = 0
last_used_row = 0
first_used_col = 0
last_used_col = 0
if len(self.__rows) > 0:
first_used_row = min(self.__rows)
last_used_row = max(self.__rows)
first_used_col = 0xFFFFFFFF
last_used_col = 0
for r in self.__rows:
_min = self.__rows[r].get_min_col()
_max = self.__rows[r].get_max_col()
if _min < first_used_col:
first_used_col = _min
if _max > last_used_col:
last_used_col = _max
return BIFFRecords.DimensionsRecord(first_used_row, last_used_row, first_used_col, last_used_col).get()
def __window2_rec(self):
options = 0
options |= (self.__show_formulas & 0x01) << 0
options |= (self.__show_grid & 0x01) << 1
options |= (self.__show_headers & 0x01) << 2
options |= (self.__panes_frozen & 0x01) << 3
options |= (self.__show_empty_as_zero & 0x01) << 4
options |= (self.__auto_colour_grid & 0x01) << 5
options |= (self.__cols_right_to_left & 0x01) << 6
options |= (self.__show_outline & 0x01) << 7
options |= (self.__remove_splits & 0x01) << 8
options |= (self.__selected & 0x01) << 9
options |= (self.__hidden & 0x01) << 10
options |= (self.__page_preview & 0x01) << 11
return BIFFRecords.Window2Record(options, self.__first_visible_row, self.__first_visible_col,
self.__grid_colour,
self.__preview_magn, self.__normal_magn).get()
def __panes_rec(self):
if self.__vert_split_pos is None and self.__horz_split_pos is None:
return ""
if self.__vert_split_pos is None:
self.__vert_split_pos = 0
if self.__horz_split_pos is None:
self.__horz_split_pos = 0
if self.__panes_frozen:
if self.__vert_split_first_visible is None:
self.__vert_split_first_visible = self.__vert_split_pos
if self.__horz_split_first_visible is None:
self.__horz_split_first_visible = self.__horz_split_pos
else:
if self.__vert_split_first_visible is None:
self.__vert_split_first_visible = 0
if self.__horz_split_first_visible is None:
self.__horz_split_first_visible = 0
# inspired by pyXLWriter
self.__horz_split_pos = 20*self.__horz_split_pos + 255
self.__vert_split_pos = 113.879*self.__vert_split_pos + 390
if self.__vert_split_pos > 0 and self.__horz_split_pos > 0:
self.__split_active_pane = 0
elif self.__vert_split_pos > 0 and self.__horz_split_pos == 0:
self.__split_active_pane = 1
elif self.__vert_split_pos == 0 and self.__horz_split_pos > 0:
self.__split_active_pane = 2
else:
self.__split_active_pane = 3
result = BIFFRecords.PanesRecord(self.__vert_split_pos,
self.__horz_split_pos,
self.__horz_split_first_visible,
self.__vert_split_first_visible,
self.__split_active_pane).get()
return result
def __row_blocks_rec(self):
# this function takes almost 99% of overall execution time
# when file is saved
# return ''
result = []
i = 0
used_rows = self.__rows.keys()
while i < len(used_rows):
j = 0
while i < len(used_rows) and (j < 32):
result.append(self.__rows[used_rows[i]].get_row_biff_data())
result.append(self.__rows[used_rows[i]].get_cells_biff_data())
j += 1
i += 1
return ''.join(result)
def __merged_rec(self):
return BIFFRecords.MergedCellsRecord(self.__merged_ranges).get()
def __bitmaps_rec(self):
return self.__bmp_rec
def __calc_settings_rec(self):
result = ''
result += BIFFRecords.CalcModeRecord(self.__calc_mode & 0x01).get()
result += BIFFRecords.CalcCountRecord(self.__calc_count & 0xFFFF).get()
result += BIFFRecords.RefModeRecord(self.__RC_ref_mode & 0x01).get()
result += BIFFRecords.IterationRecord(self.__iterations_on & 0x01).get()
result += BIFFRecords.DeltaRecord(self.__delta).get()
result += BIFFRecords.SaveRecalcRecord(self.__save_recalc & 0x01).get()
return result
def __print_settings_rec(self):
result = ''
result += BIFFRecords.PrintHeadersRecord(self.__print_headers).get()
result += BIFFRecords.PrintGridLinesRecord(self.__print_grid).get()
result += BIFFRecords.GridSetRecord(self.__grid_set).get()
result += BIFFRecords.HorizontalPageBreaksRecord(self.__horz_page_breaks).get()
result += BIFFRecords.VerticalPageBreaksRecord(self.__vert_page_breaks).get()
result += BIFFRecords.HeaderRecord(self.__header_str).get()
result += BIFFRecords.FooterRecord(self.__footer_str).get()
result += BIFFRecords.HCenterRecord(self.__print_centered_horz).get()
result += BIFFRecords.VCenterRecord(self.__print_centered_vert).get()
result += BIFFRecords.LeftMarginRecord(self.__left_margin).get()
result += BIFFRecords.RightMarginRecord(self.__right_margin).get()
result += BIFFRecords.TopMarginRecord(self.__top_margin).get()
result += BIFFRecords.BottomMarginRecord(self.__bottom_margin).get()
setup_page_options = (self.__print_in_rows & 0x01) << 0
setup_page_options |= (self.__portrait & 0x01) << 1
setup_page_options |= (0x00 & 0x01) << 2
setup_page_options |= (self.__print_not_colour & 0x01) << 3
setup_page_options |= (self.__print_draft & 0x01) << 4
setup_page_options |= (self.__print_notes & 0x01) << 5
setup_page_options |= (0x00 & 0x01) << 6
setup_page_options |= (0x01 & 0x01) << 7
setup_page_options |= (self.__print_notes_at_end & 0x01) << 9
setup_page_options |= (self.__print_omit_errors & 0x03) << 10
result += BIFFRecords.SetupPageRecord(self.__paper_size_code,
self.__print_scaling,
self.__start_page_number,
self.__fit_width_to_pages,
self.__fit_height_to_pages,
setup_page_options,
self.__print_hres,
self.__print_vres,
self.__header_margin,
self.__footer_margin,
self.__copies_num).get()
return result
def __protection_rec(self):
result = ''
result += BIFFRecords.ProtectRecord(self.__protect).get()
result += BIFFRecords.ScenProtectRecord(self.__scen_protect).get()
result += BIFFRecords.WindowProtectRecord(self.__wnd_protect).get()
result += BIFFRecords.ObjectProtectRecord(self.__obj_protect).get()
result += BIFFRecords.PasswordRecord(self.__password).get()
return result
def get_biff_data(self):
result = ''
result += self.__bof_rec()
result += self.__calc_settings_rec()
result += self.__guts_rec()
result += self.__wsbool_rec()
result += self.__colinfo_rec()
result += self.__dimensions_rec()
result += self.__print_settings_rec()
result += self.__protection_rec()
result += self.__row_blocks_rec()
result += self.__merged_rec()
result += self.__bitmaps_rec()
result += self.__window2_rec()
result += self.__panes_rec()
result += self.__eof_rec()
return result
| agpl-3.0 | -1,635,084,034,952,873,500 | 31.108998 | 128 | 0.512337 | false |
renegelinas/mi-instrument | mi/instrument/ooici/mi/test_driver/driver.py | 7 | 21975 | # """
# @package mi.instrument.ooici.mi.test_driver.driver
# @file marine-integrations/mi/instrument/ooici/mi/test_driver/driver.py
# @author Bill French
# @brief Driver for the test_driver
# Release notes:
#
# This driver is used for coi testing
#
# """
#
# __author__ = 'Bill French'
# __license__ = 'Apache 2.0'
#
# import string
# import time
# import random
# import struct
# import base64
#
# from mi.core.log import get_logger ; log = get_logger()
#
# from threading import Thread
#
# from mi.core.time_tools import time_to_ntp_date_time
#
# from mi.core.common import BaseEnum
# from mi.core.instrument.data_particle import RawDataParticle
# from mi.core.instrument.data_particle import RawDataParticleKey
# from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
# from mi.core.instrument.instrument_fsm import InstrumentFSM
# from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
# from mi.core.instrument.instrument_driver import DriverEvent
# from mi.core.instrument.instrument_driver import DriverAsyncEvent
# from mi.core.instrument.instrument_driver import DriverProtocolState
# from mi.core.instrument.instrument_driver import DriverParameter
# from mi.core.instrument.instrument_driver import ResourceAgentState
# from mi.core.instrument.instrument_driver import DriverConnectionState
# from mi.core.instrument.data_particle import DataParticle
# from mi.core.instrument.data_particle import DataParticleKey
# from mi.core.instrument.data_particle import CommonDataParticleType
# from mi.core.instrument.chunker import StringChunker
# from mi.core.instrument.protocol_param_dict import ParameterDictType
# from mi.core.instrument.protocol_param_dict import Parameter
# from mi.core.exceptions import InstrumentParameterException
#
# # newline.
# NEWLINE = '\r\n'
#
# # default timeout.
# TIMEOUT = 10
#
# ###
# # Driver Constant Definitions
# ###
#
# class DataParticleType(BaseEnum):
# """
# Data particle types produced by this driver
# """
# RAW = CommonDataParticleType.RAW
#
# class ProtocolState(BaseEnum):
# """
# Instrument protocol states
# """
# UNKNOWN = DriverProtocolState.UNKNOWN
# COMMAND = DriverProtocolState.COMMAND
# AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
# DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
# TEST = DriverProtocolState.TEST
# CALIBRATE = DriverProtocolState.CALIBRATE
#
# class ProtocolEvent(BaseEnum):
# """
# Protocol events
# """
# ENTER = DriverEvent.ENTER
# EXIT = DriverEvent.EXIT
# GET = DriverEvent.GET
# SET = DriverEvent.SET
# DISCOVER = DriverEvent.DISCOVER
# START_DIRECT = DriverEvent.START_DIRECT
# STOP_DIRECT = DriverEvent.STOP_DIRECT
# ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
# START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
# STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
# EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
# INIT_PARAMS = DriverEvent.INIT_PARAMS
#
# class Capability(BaseEnum):
# """
# Protocol events that should be exposed to users (subset of above).
# """
# START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
# STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
#
# class ParameterName(DriverParameter):
# """
# Device specific parameters.
# """
# PAYLOAD_SIZE = 'PAYLOAD_SIZE'
# SAMPLE_INTERVAL = 'SAMPLE_INTERVAL'
#
# class Prompt(BaseEnum):
# """
# Device i/o prompts..
# """
#
# class InstrumentCommand(BaseEnum):
# """
# Instrument command strings
# """
#
#
# ###############################################################################
# # Data Particles
# ###############################################################################
# class TestDataParticle(RawDataParticle):
# def _build_parsed_values(self):
# """
# Build a particle out of a port agent packet.
# @returns A list that is ready to be added to the "values" tag before
# the structure is JSONified
# """
#
# payload = base64.b64encode(self.raw_data)
# length = len(self.raw_data)
# checksum = 1
# ptype = 1
#
# result = [{
# DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
# DataParticleKey.VALUE: payload,
# DataParticleKey.BINARY: True},
# {
# DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
# DataParticleKey.VALUE: length},
# {
# DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
# DataParticleKey.VALUE: ptype},
# {
# DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
# DataParticleKey.VALUE: checksum},
#
# ]
#
# return result
#
#
# ###############################################################################
# # Driver
# ###############################################################################
#
# class InstrumentDriver(SingleConnectionInstrumentDriver):
# """
# InstrumentDriver subclass
# Subclasses SingleConnectionInstrumentDriver with connection state
# machine.
# """
#
# ########################################################################
# # Protocol builder.
# ########################################################################
#
# def _build_protocol(self):
# """
# Construct the driver protocol state machine.
# """
# self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
#
# ########################################################################
# # Connected handlers.
# ########################################################################
#
# def _handler_connected_disconnect(self, *args, **kwargs):
# """
# Disconnect to the device via port agent / logger and destroy the
# protocol FSM.
# @retval (next_state, result) tuple, (DriverConnectionState.DISCONNECTED,
# None) if successful.
# """
# next_state = None
# result = None
#
# log.info("_handler_connected_disconnect: invoking stop_comms().")
# self._protocol = None
# next_state = DriverConnectionState.DISCONNECTED
#
# return (next_state, result)
#
# ########################################################################
# # Disconnected handlers.
# ########################################################################
#
# def _handler_disconnected_configure(self, *args, **kwargs):
# """
# Configure driver for device comms.
# @param args[0] Communiations config dictionary.
# @retval (next_state, result) tuple, (None, None).
# @raises InstrumentParameterException if missing or invalid param dict.
# """
# next_state = None
# result = None
#
# return (next_state, result)
#
# def _handler_disconnected_connect(self, *args, **kwargs):
# """
# Establish communications with the device via port agent / logger and
# construct and intialize a protocol FSM for device interaction.
# @retval (next_state, result) tuple, (DriverConnectionState.CONNECTED,
# None) if successful.
# @raises InstrumentConnectionException if the attempt to connect failed.
# """
# log.debug("_handler_disconnected_connect. Mocked")
# next_state = DriverConnectionState.CONNECTED
# result = None
#
# self._build_protocol()
#
# return (next_state, result)
#
#
# ###########################################################################
# # Protocol
# ###########################################################################
#
# class Protocol(CommandResponseInstrumentProtocol):
# """
# Instrument protocol class
# Subclasses CommandResponseInstrumentProtocol
# """
# def __init__(self, prompts, newline, driver_event):
# """
# Protocol constructor.
# @param prompts A BaseEnum class containing instrument prompts.
# @param newline The newline.
# @param driver_event Driver process event callback.
# """
# # Construct protocol superclass.
# CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
#
# # Build protocol state machine.
# self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,
# ProtocolEvent.ENTER, ProtocolEvent.EXIT)
#
# # Add event handlers for protocol state machine.
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
# self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
#
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_get)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_autosample_start)
# self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.INIT_PARAMS, self._handler_command_init_params)
#
# self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
# self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
# self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop)
#
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER, self._handler_direct_access_enter)
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT, self._handler_direct_access_exit)
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct)
# self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct)
#
# # Construct the parameter dictionary containing device parameters,
# # current parameter values, and set formatting functions.
# self._build_param_dict()
#
# # Add build handlers for device commands.
#
# # Add response handlers for device commands.
#
# # Add sample handlers.
#
# # State state machine in UNKNOWN state.
# self._protocol_fsm.start(ProtocolState.UNKNOWN)
#
# # commands sent sent to device to be filtered in responses for telnet DA
# self._sent_cmds = []
#
# #
# self._chunker = StringChunker(Protocol.sieve_function)
#
# self._payload_cache = {}
#
#
# @staticmethod
# def sieve_function(raw_data):
# """
# The method that splits samples
# """
#
# return_list = []
#
# return return_list
#
# def _build_param_dict(self):
# """
# Populate the parameter dictionary with parameters.
# For each parameter key, add match stirng, match lambda function,
# and value formatting function for set commands.
# """
# # Add parameter handlers to parameter dict.
# self._param_dict.add_parameter(
# Parameter(ParameterName.PAYLOAD_SIZE,
# int,
# type=ParameterDictType.INT,
# display_name="Payload Size",
# startup_param = True,
# direct_access = True,
# default_value = 1024)
# )
# self._param_dict.add_parameter(
# Parameter(ParameterName.SAMPLE_INTERVAL,
# int,
# type=ParameterDictType.INT,
# display_name="Sample Interval (sec)",
# startup_param = True,
# direct_access = True,
# default_value = 1)
# )
#
# def _got_chunk(self, chunk):
# """
# The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
# with the appropriate particle objects and REGEXes.
# """
#
# def _filter_capabilities(self, events):
# """
# Return a list of currently available capabilities.
# """
# return [x for x in events if Capability.has(x)]
#
# ########################################################################
# # Unknown handlers.
# ########################################################################
#
# def _handler_unknown_enter(self, *args, **kwargs):
# """
# Enter unknown state.
# """
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# def _handler_unknown_exit(self, *args, **kwargs):
# """
# Exit unknown state.
# """
# pass
#
# def _handler_unknown_discover(self, *args, **kwargs):
# """
# Discover current state
# @retval (next_state, result)
# """
# return (ProtocolState.COMMAND, ResourceAgentState.IDLE)
#
# ########################################################################
# # Command handlers.
# ########################################################################
#
# def _handler_command_enter(self, *args, **kwargs):
# """
# Enter command state.
# @throws InstrumentTimeoutException if the device cannot be woken.
# @throws InstrumentProtocolException if the update commands and not recognized.
# """
# self._protocol_fsm.on_event(DriverEvent.INIT_PARAMS)
#
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# def _handler_command_set(self, *args, **kwargs):
# """
# Set parameter
# """
# next_state = None
# result = None
#
# self._set_params(*args, **kwargs)
#
# log.debug("_handler_command_set: result: %s", result)
#
# return (next_state, result)
#
# def _handler_command_exit(self, *args, **kwargs):
# """
# Exit command state.
# """
# pass
#
# def _handler_command_start_direct(self):
# """
# Start direct access
# """
# next_state = ProtocolState.DIRECT_ACCESS
# next_agent_state = ResourceAgentState.DIRECT_ACCESS
# result = None
# log.debug("_handler_command_start_direct: entering DA mode")
# return (next_state, (next_agent_state, result))
#
# def _handler_command_autosample_start(self, *args, **kwargs):
# next_state = ProtocolState.AUTOSAMPLE
# next_agent_state = ResourceAgentState.STREAMING
# result = None
# return (next_state, (next_agent_state, result))
#
# def _handler_command_init_params(self, *args, **kwargs):
# """
# initialize parameters
# """
# next_state = None
# result = None
#
# self._init_params()
# return (next_state, result)
#
# ########################################################################
# # Autosample handlers.
# ########################################################################
#
# def _handler_autosample_enter(self, *args, **kwargs):
# """
# Enter autosample state.
# """
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# self._start_packet_generator()
#
# def _handler_autosample_exit(self, *args, **kwargs):
# """
# Exit autosample state.
# """
# self._stop_packet_generator()
#
# def _handler_autosample_stop(self, *args, **kwargs):
# """
# Stop autosample and switch back to command mode.
# @retval (next_state, result) tuple, (ProtocolState.COMMAND,
# (next_agent_state, None) if successful.
# @throws InstrumentTimeoutException if device cannot be woken for command.
# @throws InstrumentProtocolException if command misunderstood or
# incorrect prompt received.
# """
# next_state = None
# result = None
#
# next_state = ProtocolState.COMMAND
# next_agent_state = ResourceAgentState.COMMAND
#
# return (next_state, (next_agent_state, result))
#
# ########################################################################
# # Direct access handlers.
# ########################################################################
#
# def _handler_direct_access_enter(self, *args, **kwargs):
# """
# Enter direct access state.
# """
# # Tell driver superclass to send a state change event.
# # Superclass will query the state.
# self._driver_event(DriverAsyncEvent.STATE_CHANGE)
#
# self._sent_cmds = []
#
# def _handler_direct_access_exit(self, *args, **kwargs):
# """
# Exit direct access state.
# """
# pass
#
# def _handler_direct_access_execute_direct(self, data):
# """
# """
# next_state = None
# result = None
# next_agent_state = None
#
# self._do_cmd_direct(data)
#
# # add sent command to list for 'echo' filtering in callback
# self._sent_cmds.append(data)
#
# return (next_state, (next_agent_state, result))
#
# def _handler_direct_access_stop_direct(self):
# """
# @throw InstrumentProtocolException on invalid command
# """
# next_state = None
# result = None
#
# next_state = ProtocolState.COMMAND
# next_agent_state = ResourceAgentState.COMMAND
#
# return (next_state, (next_agent_state, result))
#
#
# ########################################################################
# # Helpers
# ########################################################################
# def _start_packet_generator(self):
# packet_size = self._param_dict.get(ParameterName.PAYLOAD_SIZE)
# sample_interval = self._param_dict.get(ParameterName.SAMPLE_INTERVAL)
#
# self._generate_payload_value(packet_size)
#
# self._stop_generator_thread = False
# self._generator_thread = Thread(
# target=self._generate_packets,
# args=(packet_size, sample_interval, self._publish_packet ))
# self._generator_thread.start()
#
# def _generate_packets(self, *args, **kwargs):
# packet_size = args[0]
# sample_interval = args[1]
# publish_callback = args[2]
#
# log.debug("_generate_packets, starting packet generator. packet_size: %s, sample_interval: %s", packet_size, sample_interval)
#
# while(self._stop_generator_thread != True):
# publish_callback(packet_size)
# time.sleep(sample_interval)
#
# log.debug("_generate_packets, stopping packet generator")
#
# def _publish_packet(self, packet_size):
# buf = self._get_payload_value(packet_size)
# particle = TestDataParticle(buf, port_timestamp=time_to_ntp_date_time())
#
# log.debug("_publish_packet, packet size: %d", len(buf))
# self._driver_event(DriverAsyncEvent.SAMPLE, particle.generate())
#
# def _get_payload_value(self, packet_size):
# if self._payload_cache.get(packet_size):
# return self._payload_cache[packet_size]
#
# return self._generate_payload_value(packet_size)
#
# def _generate_payload_value(self, packet_size):
# log.debug("generating new value, packet size: %s", packet_size)
# charlist = [random.choice(string.letters) for _ in range(packet_size)]
# buf = struct.pack('%sc' % len(charlist), *charlist)
# self._payload_cache[packet_size] = buf
# return buf
#
# def _stop_packet_generator(self):
# log.debug("_stop_packet_generator: Signal the packet generator to stop")
# self._stop_generator_thread = True
#
# self._generator_thread.join(60)
#
# def _set_params(self, *args, **kwargs):
# """
# Issue commands to the instrument to set various parameters
# """
# startup = False
# config_change = False
#
# result = {}
#
# try:
# params = args[0]
# except IndexError:
# raise InstrumentParameterException('Set command requires a parameter dict.')
#
# try:
# startup = args[1]
# except IndexError:
# pass
#
# for (key, val) in params.iteritems():
# log.debug("KEY = " + str(key) + " VALUE = " + str(val))
# if self._param_dict.get(key) != val:
# config_change = True
# self._param_dict.set_value(key, val)
# result[key] = val
#
# if config_change:
# self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
#
# return result
| bsd-2-clause | 7,561,492,230,685,583,000 | 36.057336 | 143 | 0.56678 | false |
sputnick-dev/weboob | modules/attilasub/test.py | 7 | 1310 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
import urllib
from random import choice
class AttilasubTest(BackendTest):
MODULE = 'attilasub'
def test_subtitle(self):
subtitles = list(self.backend.iter_subtitles('fr', 'spiderman'))
assert (len(subtitles) > 0)
for subtitle in subtitles:
path, qs = urllib.splitquery(subtitle.url)
assert path.endswith('.rar')
# get the file of a random sub
if len(subtitles):
subtitle = choice(subtitles)
self.backend.get_subtitle_file(subtitle.id)
| agpl-3.0 | -1,430,543,221,737,440,300 | 32.589744 | 77 | 0.70229 | false |
chwiede/pyads | pyads/adsclient.py | 1 | 6764 | import time
import select
import socket
import struct
import threading
import errno
from .amspacket import AmsPacket
from .adsconnection import AdsConnection
from .adsexception import AdsException
from .commands import *
class InvalidPacket(AdsException):
pass
class AdsClient:
def __init__(self, adsConnection = None, amsTarget = None, amsSource = None, targetIP = None):
if adsConnection != None and amsTarget == None and amsSource == None:
self.AdsConnection = adsConnection
elif amsTarget != None and adsConnection == None:
self.AdsConnection = AdsConnection(amsTarget, amsSource, targetIP)
else:
raise Exception('You must specify either connection or adsTarget, not both.')
self.response = b''
MAX_RETRY_ON_FAIL = 3
Debug = False
RetryOnFail = 0
AdsConnection = None
AdsPortDefault = 0xBF02
AdsIndexGroupIn = 0xF020
AdsIndexGroupOut = 0xF030
AdsChunkSizeDefault = 1024
Socket = None
_CurrentInvokeID = 0x8000
_CurrentPacket = None
_CurrentError = None
@property
def IsConnected(self):
return self.Socket != None and self.Socket.fileno() >= 0
def Close(self):
if (self.Socket != None):
self.Socket.close()
self.Socket = None
def Connect(self):
self.Close()
self.Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.Socket.settimeout(2)
try:
self.Socket.connect((self.AdsConnection.TargetIP, self.AdsPortDefault))
self._BeginAsyncRead()
except socket.error:
raise AdsException(0x274c)
def _BeginAsyncRead(self):
self._AsyncReadThread = threading.Thread(target=self._AsyncRead)
self._AsyncReadThread.start()
def _AsyncRead(self):
while self.IsConnected:
try:
ready = select.select([self.Socket], [], [], 0.1)
if ready[0] and self.IsConnected:
newPacket = self.ReadAmsPacketFromSocket()
if newPacket.InvokeID == self._CurrentInvokeID:
self._CurrentPacket = newPacket
else:
print("Packet dropped:")
print(newPacket)
except (socket.error, select.error, InvalidPacket) as e:
self.Close()
self._CurrentError = e
break
def ReadAmsPacketFromSocket(self):
# generate packet from cache, or read more data from recv buffer.
if len(self.response) == 0:
response = self.Socket.recv(self.AdsChunkSizeDefault)
else:
response = self.response
# ensure correct beckhoff tcp header
if(len(response) < 6):
raise InvalidPacket('Invalid packet received')
# first two bits must be 0
if (response[0:2] != b'\x00\x00'):
raise InvalidPacket('Invalid packet received')
# read whole data length
dataLen = struct.unpack('I', response[2:6])[0] + 6
# read rest of data, if any
while (len(response) < dataLen):
nextReadLen = min(self.AdsChunkSizeDefault, dataLen - len(response))
response += self.Socket.recv(nextReadLen)
# cut off tcp-header and return response amspacket
packet = AmsPacket.FromBinaryData(response[6:dataLen])
self.response = response[dataLen:]
return packet
def GetTcpHeader(self, amsData):
# pack 2 bytes (reserved) and 4 bytes (length)
# format _must_ be little endian!
return struct.pack('<HI', 0, len(amsData))
def SendAndRecv(self, amspacket):
if not self.IsConnected:
self.Connect()
# prepare packet with invoke id
self.PrepareCommandInvoke(amspacket)
# send tcp-header and ams-data
try:
self.Socket.send(self.GetTCPPacket(amspacket))
except socket.error as e:
# if i fail Socket.send i try again for 3 times
if self.RetryOnFail < self.MAX_RETRY_ON_FAIL:
self.RetryOnFail += 1
# if i have a BROKEN PIPE error i reconnect
# the socket before try again
if e.errno == errno.EPIPE:
self.Connect()
return self.SendAndRecv(amspacket)
else:
self.RetryOnFail = 0
raise AdsException(0x274c)
# here's your packet
return self.AwaitCommandInvoke()
def GetTCPPacket(self, amspacket):
# get ams-data and generate tcp-header
amsData = amspacket.GetBinaryData()
tcpHeader = self.GetTcpHeader(amsData)
return tcpHeader + amsData
def PrepareCommandInvoke(self, amspacket):
if(self._CurrentInvokeID < 0xFFFF):
self._CurrentInvokeID += 1
else:
self._CurrentInvokeID = 0x8000
self._CurrentPacket = None
self._CurrentError = None
amspacket.InvokeID = self._CurrentInvokeID
if self.Debug:
print(">>> sending ams-packet:")
print(amspacket)
def AwaitCommandInvoke(self):
# unfortunately threading.event is slower than this oldschool poll :-(
timeout = 0
while (self._CurrentPacket == None):
if self._CurrentError:
raise self._CurrentError
timeout += 0.001
time.sleep(0.001)
if (timeout > 10):
raise AdsException(0x745)
if self.Debug:
print("<<< received ams-packet:")
print(self._CurrentPacket)
return self._CurrentPacket
def ReadDeviceInfo(self):
return DeviceInfoCommand().Execute(self)
def Read(self, indexGroup, indexOffset, length):
return ReadCommand(indexGroup, indexOffset, length).Execute(self)
def Write(self, indexGroup, indexOffset, data):
return WriteCommand(indexGroup, indexOffset, data).Execute(self)
def ReadState(self):
return ReadStateCommand().Execute(self)
def WriteControl(self, adsState, deviceState, data = b''):
return WriteControlCommand(adsState, deviceState, data).Execute(self)
def AddDeviceNotification(self):
raise NotImplementedError()
def DeleteDeviceNotification(self):
raise NotImplementedError()
def ReadWrite(self, indexGroup, indexOffset, readLen, dataToWrite = b''):
return ReadWriteCommand(indexGroup, indexOffset, readLen, dataToWrite).Execute(self)
def __enter__(self):
return self
def __exit__(self, vtype, value, traceback):
self.Close()
| mit | -6,852,509,406,677,497,000 | 25.735178 | 98 | 0.603933 | false |
SirPigles/rsf | forums/test/python/test.py | 7 | 1204 | import sys
import os
import re
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
i = 0
for key in keys:
if not re.search("^HTTP_|^REQUEST_", key):
continue
if i == 0:
print """<tr class="normal"><td>""", escape(key), "</td><td>", escape(environ[key]), "</td></tr>"
i = 1
else:
print """<tr class="alt"><td>""", escape(key), "</td><td>", escape(environ[key]), "</td></tr>"
i = 0
def escape(s, quote=None):
"""Replace special characters '&', '<' and '>' by SGML entities."""
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
print """Content-type: text/html
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title></title>
<link rel="stylesheet" type="text/css" href="../../css/style.css">
</head>
<body>
<table cellspacing="0" cellpadding="0" border="0">
<tr class="subhead" align="Left"><th>Name</th><th>Value</th></tr>"""
print_environ()
print """</table>
</body>
</html>"""
| apache-2.0 | -1,709,632,533,602,733,600 | 26.363636 | 109 | 0.542359 | false |
OMNILab/CCFBDC2014 | data_process/feature_spatio-temporal/feature_spatio/feature_spatio.py | 1 | 2551 | # -*- coding: utf-8 -*-
import sys
import os
import math
#读写文件接口函数文件函数
def read_write_file(rfname,wfname,resource_files):
readf = open(rfname,'r')
writef = open(wfname,'w')
for line in readf:
#add other functions to deal with each line
result_line = location_extr(line.strip(),resource_files)
writef.write(result_line + '\n')
writef.close()
readf.close()
#由值查主键,输入一个单词,如果查到返回主键名称,没有返回0
def value_to_key(value,resourcef_name):
readf = open(resourcef_name)
result = 0
for line in readf:
line_split = line.split('\t')
key = line_split[0]
values = line_split[1]
if values.find(value)==-1:
continue
else:
result = key
break
readf.close()
return result
#地点提取函数,先在标题里面找,如果没有再在内容里面找,从频次高的到频次低的找,找到即标注地点,输出“地区、省、市”这样格式的信息
def location_extr(record,resource_files):
record_split = record.split('\t')
event_id = record_split[0]
title_loc = record_split[1].strip('"')
content_loc = record_split[2].strip('"')
basic_info = event_id + '\t'
line_rslt = event_id + '' + '' + ''
province = ''
area = ''
title_loc_dealed = content_loc_match(title_loc,resource_files)
if title_loc_dealed!=0:
return basic_info + title_loc_dealed
content_loc_dealed = content_loc_match(content_loc,resource_files)
if content_loc_dealed!=0:
return basic_info + content_loc_dealed
return line_rslt
#内容与地点库匹配函数
def content_loc_match(content_loc,resource_files):
resource_files_split = resource_files.split('\t')
prov_city_map = resource_files_split[0]
area_prov_map = resource_files_split[1]
if content_loc!='':
content_loc_split = content_loc.split(' ')
#map the cities
for num in range(len(content_loc_split)):
content_city_name = content_loc_split[num].split(':')[0]
province = value_to_key(content_city_name,prov_city_map)
if(province!=0):
area = value_to_key(province,area_prov_map)
if content_city_name.find("市")==-1:
content_city_name = content_city_name + "市"
line_rslt = area + '\t' + province + '\t' + content_city_name
return line_rslt
#if there arenn't cities, map provinces
for num in range(len(content_loc_split)):
content_province_name = content_loc_split[num].split(':')[0]
area = value_to_key(content_province_name,area_prov_map)
if(area!=0):
line_rslt = area + '\t' + content_province_name
return line_rslt
return 0
| mit | 5,812,511,136,698,952,000 | 27.9875 | 67 | 0.684778 | false |
Axelio/pruebas_camelot | videostore/model.py | 1 | 1389 |
from sqlalchemy import Unicode, Date, Integer
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.orm import relationship
import sqlalchemy.types
from camelot.admin.entity_admin import EntityAdmin
from camelot.core.orm import Entity
import camelot.types
class Movie( Entity ):
__tablename__ = 'movie'
title = Column( Unicode(60), nullable = False )
short_description = Column( Unicode(512) )
release_date = Column( Date() )
genre = Column( Unicode(15) )
director_id = Column( Integer, ForeignKey('director.id') )
director = relationship( 'Director',
backref = 'movies' )
def __unicode__( self ):
return self.title or 'Untitled movie'
class Admin( EntityAdmin ):
verbose_name = 'Movie'
list_display = ['title', 'short_description', 'release_date', 'genre']
list_display = [ 'title',
'short_description',
'release_date',
'genre',
'director' ]
class Director( Entity ):
__tablename__ = 'director'
name = Column( Unicode( 60 ) )
class Admin( EntityAdmin ):
verbose_name = 'Director'
list_display = [ 'name' ]
form_display = list_display + ['movies']
def __unicode__(self):
return self.name or 'unknown director'
| gpl-3.0 | 5,921,483,481,911,714,000 | 26.78 | 78 | 0.587473 | false |
h-mayorquin/competitive_and_selective_learning | play.py | 1 | 1250 | """
This is the play
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from functions import selection_algorithm, scl
from csl import CSL
plot = True
verbose = False
tracking = True
selection = False
# Generate the data
n_samples = 1500
random_state = 20 # Does not converge
random_state = 41
random_state = 105 # Does not converge
random_state = 325325
random_state = 1111
n_features = 2
centers = 7
X, y = make_blobs(n_samples, n_features, centers, random_state=random_state)
# The algorithm
N = centers
s = 2 # Number of neurons to change per round
eta = 0.1
T = 100
csl = CSL(n_clusters=N, n_iter=T, tol=0.001, eta=eta, s0=s, random_state=np.random)
csl.fit(X)
neurons = csl.centers_
if False:
kmeans = KMeans(n_clusters=N)
kmeans.fit(X)
neurons = kmeans.cluster_centers_
if plot:
# Visualize X
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(X[:, 0], X[:, 1], 'x', markersize=6)
ax.hold(True)
if True:
for n in range(N):
ax.plot(neurons[n, 0], neurons[n, 1], 'o', markersize=12, label='neuron ' + str(n))
ax.legend()
# fig.show()
plt.show()
| mit | -4,808,629,288,575,466,000 | 20.186441 | 95 | 0.66 | false |
repotvsupertuga/tvsupertuga.repository | plugin.video.TVsupertuga/resources/lib/plugins/unidecode/x1d6.py | 248 | 3974 | data = (
's', # 0x00
't', # 0x01
'u', # 0x02
'v', # 0x03
'w', # 0x04
'x', # 0x05
'y', # 0x06
'z', # 0x07
'A', # 0x08
'B', # 0x09
'C', # 0x0a
'D', # 0x0b
'E', # 0x0c
'F', # 0x0d
'G', # 0x0e
'H', # 0x0f
'I', # 0x10
'J', # 0x11
'K', # 0x12
'L', # 0x13
'M', # 0x14
'N', # 0x15
'O', # 0x16
'P', # 0x17
'Q', # 0x18
'R', # 0x19
'S', # 0x1a
'T', # 0x1b
'U', # 0x1c
'V', # 0x1d
'W', # 0x1e
'X', # 0x1f
'Y', # 0x20
'Z', # 0x21
'a', # 0x22
'b', # 0x23
'c', # 0x24
'd', # 0x25
'e', # 0x26
'f', # 0x27
'g', # 0x28
'h', # 0x29
'i', # 0x2a
'j', # 0x2b
'k', # 0x2c
'l', # 0x2d
'm', # 0x2e
'n', # 0x2f
'o', # 0x30
'p', # 0x31
'q', # 0x32
'r', # 0x33
's', # 0x34
't', # 0x35
'u', # 0x36
'v', # 0x37
'w', # 0x38
'x', # 0x39
'y', # 0x3a
'z', # 0x3b
'A', # 0x3c
'B', # 0x3d
'C', # 0x3e
'D', # 0x3f
'E', # 0x40
'F', # 0x41
'G', # 0x42
'H', # 0x43
'I', # 0x44
'J', # 0x45
'K', # 0x46
'L', # 0x47
'M', # 0x48
'N', # 0x49
'O', # 0x4a
'P', # 0x4b
'Q', # 0x4c
'R', # 0x4d
'S', # 0x4e
'T', # 0x4f
'U', # 0x50
'V', # 0x51
'W', # 0x52
'X', # 0x53
'Y', # 0x54
'Z', # 0x55
'a', # 0x56
'b', # 0x57
'c', # 0x58
'd', # 0x59
'e', # 0x5a
'f', # 0x5b
'g', # 0x5c
'h', # 0x5d
'i', # 0x5e
'j', # 0x5f
'k', # 0x60
'l', # 0x61
'm', # 0x62
'n', # 0x63
'o', # 0x64
'p', # 0x65
'q', # 0x66
'r', # 0x67
's', # 0x68
't', # 0x69
'u', # 0x6a
'v', # 0x6b
'w', # 0x6c
'x', # 0x6d
'y', # 0x6e
'z', # 0x6f
'A', # 0x70
'B', # 0x71
'C', # 0x72
'D', # 0x73
'E', # 0x74
'F', # 0x75
'G', # 0x76
'H', # 0x77
'I', # 0x78
'J', # 0x79
'K', # 0x7a
'L', # 0x7b
'M', # 0x7c
'N', # 0x7d
'O', # 0x7e
'P', # 0x7f
'Q', # 0x80
'R', # 0x81
'S', # 0x82
'T', # 0x83
'U', # 0x84
'V', # 0x85
'W', # 0x86
'X', # 0x87
'Y', # 0x88
'Z', # 0x89
'a', # 0x8a
'b', # 0x8b
'c', # 0x8c
'd', # 0x8d
'e', # 0x8e
'f', # 0x8f
'g', # 0x90
'h', # 0x91
'i', # 0x92
'j', # 0x93
'k', # 0x94
'l', # 0x95
'm', # 0x96
'n', # 0x97
'o', # 0x98
'p', # 0x99
'q', # 0x9a
'r', # 0x9b
's', # 0x9c
't', # 0x9d
'u', # 0x9e
'v', # 0x9f
'w', # 0xa0
'x', # 0xa1
'y', # 0xa2
'z', # 0xa3
'i', # 0xa4
'j', # 0xa5
'', # 0xa6
'', # 0xa7
'Alpha', # 0xa8
'Beta', # 0xa9
'Gamma', # 0xaa
'Delta', # 0xab
'Epsilon', # 0xac
'Zeta', # 0xad
'Eta', # 0xae
'Theta', # 0xaf
'Iota', # 0xb0
'Kappa', # 0xb1
'Lamda', # 0xb2
'Mu', # 0xb3
'Nu', # 0xb4
'Xi', # 0xb5
'Omicron', # 0xb6
'Pi', # 0xb7
'Rho', # 0xb8
'Theta', # 0xb9
'Sigma', # 0xba
'Tau', # 0xbb
'Upsilon', # 0xbc
'Phi', # 0xbd
'Chi', # 0xbe
'Psi', # 0xbf
'Omega', # 0xc0
'nabla', # 0xc1
'alpha', # 0xc2
'beta', # 0xc3
'gamma', # 0xc4
'delta', # 0xc5
'epsilon', # 0xc6
'zeta', # 0xc7
'eta', # 0xc8
'theta', # 0xc9
'iota', # 0xca
'kappa', # 0xcb
'lamda', # 0xcc
'mu', # 0xcd
'nu', # 0xce
'xi', # 0xcf
'omicron', # 0xd0
'pi', # 0xd1
'rho', # 0xd2
'sigma', # 0xd3
'sigma', # 0xd4
'tai', # 0xd5
'upsilon', # 0xd6
'phi', # 0xd7
'chi', # 0xd8
'psi', # 0xd9
'omega', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
| gpl-2.0 | -1,488,419,387,175,585,300 | 14.403101 | 20 | 0.354051 | false |
GeyerA/android_external_chromium_org | chrome/common/extensions/docs/server2/api_list_data_source_test.py | 24 | 2930 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from api_list_data_source import APIListDataSource
from compiled_file_system import CompiledFileSystem
from copy import deepcopy
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
def _ToTestData(obj):
'''Transforms |obj| into test data by turning a list of files into an object
mapping that file to its contents (derived from its name).
'''
return (dict((name, name) for name in obj) if isinstance(obj, list) else
dict((key, _ToTestData(value)) for key, value in obj.items()))
_TEST_DATA = _ToTestData({
'api': [
'alarms.idl',
'app_window.idl',
'browser_action.json',
'experimental_bluetooth.idl',
'experimental_history.idl',
'experimental_power.idl',
'infobars.idl',
'something_internal.idl',
'something_else_internal.json',
'storage.json',
],
'public': {
'apps': [
'alarms.html',
'app_window.html',
'experimental_bluetooth.html',
'experimental_power.html',
'storage.html',
],
'extensions': [
'alarms.html',
'browserAction.html',
'experimental_history.html',
'experimental_power.html',
'infobars.html',
'storage.html',
],
},
})
class APIListDataSourceTest(unittest.TestCase):
def setUp(self):
file_system = TestFileSystem(deepcopy(_TEST_DATA))
self._factory = APIListDataSource.Factory(
CompiledFileSystem.Factory(
file_system, ObjectStoreCreator.ForTest()),
file_system,
'api',
'public')
def testApps(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'alarms'},
{'name': 'app.window'},
{'name': 'storage', 'last': True}],
api_list.get('apps').get('chrome'))
def testExperimentalApps(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'experimental.bluetooth'},
{'name': 'experimental.power', 'last': True}],
sorted(api_list.get('apps').get('experimental')))
def testExtensions(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'alarms'},
{'name': 'browserAction'},
{'name': 'infobars'},
{'name': 'storage', 'last': True}],
sorted(api_list.get('extensions').get('chrome')))
def testExperimentalApps(self):
api_list = self._factory.Create()
self.assertEqual([{'name': 'experimental.history'},
{'name': 'experimental.power', 'last': True}],
sorted(api_list.get('extensions').get('experimental')))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 339,240,571,077,742,850 | 31.555556 | 78 | 0.603413 | false |
arrabito/DIRAC | DataManagementSystem/Client/DataManager.py | 3 | 74453 | """
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
import errno
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize, breakListIntoChunks
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # RSCID
__RCSID__ = "$Id$"
def _isOlderThan(stringTime, days):
""" Check if a time stamp is older than a given number of days """
timeDelta = timedelta(days=days)
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
def _initialiseAccountingObject(operation, se, files):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get('username', 'unknown')
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict(accountingDict)
return oDataOperation
class DataManager(object):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__(self, catalogs=None, masterCatalogOnly=False, vo=False):
""" c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger(self.__class__.__name__, True)
self.voName = vo
if catalogs is None:
catalogs = []
catalogsToUse = FileCatalog(vo=self.voName).getMasterCatalogNames()[
'Value'] if masterCatalogOnly else catalogs
self.fileCatalog = FileCatalog(catalogs=catalogsToUse, vo=self.voName)
self.accountingClient = None
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations(vo=self.voName).getValue(
'DataManagement/IgnoreMissingInFC', False)
self.useCatalogPFN = Operations(vo=self.voName).getValue(
'DataManagement/UseCatalogPFN', True)
self.dmsHelper = DMSHelpers(vo=vo)
self.registrationProtocol = self.dmsHelper.getRegistrationProtocols()
self.thirdPartyProtocols = self.dmsHelper.getThirdPartyProtocols()
def setAccountingClient(self, client):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __hasAccess(self, opType, path):
""" Check if we have permission to execute given operation on the given file (if exists) or its directory
"""
if isinstance(path, basestring):
paths = [path]
else:
paths = list(path)
res = self.fileCatalog.hasAccess(paths, opType)
if not res['OK']:
return res
result = {'Successful': list(), 'Failed': list()}
for path in paths:
isAllowed = res['Value']['Successful'].get(path, False)
if isAllowed:
result['Successful'].append(path)
else:
result['Failed'].append(path)
return S_OK(result)
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory(self, lfnDir):
""" Clean the logical directory from the catalog and storage
"""
log = self.log.getSubLogger('cleanLogicalDirectory')
if isinstance(lfnDir, basestring):
lfnDir = [lfnDir]
retDict = {"Successful": {}, "Failed": {}}
for folder in lfnDir:
res = self.__cleanDirectory(folder)
if not res['OK']:
log.debug("Failed to clean directory.", "%s %s" %
(folder, res['Message']))
retDict["Failed"][folder] = res['Message']
else:
log.debug("Successfully removed directory.", folder)
retDict["Successful"][folder] = res['Value']
return S_OK(retDict)
def __cleanDirectory(self, folder):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
log = self.log.getSubLogger('__cleanDirectory')
res = self.__hasAccess('removeDirectory', folder)
if not res['OK']:
return res
if folder not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, folder)
return S_ERROR(errStr)
res = self.__getCatalogDirectoryContents([folder])
if not res['OK']:
return res
res = self.removeFile(res['Value'])
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].iteritems():
log.error("Failed to remove file found in the catalog",
"%s %s" % (lfn, reason))
res = returnSingleResult(self.removeFile(['%s/dirac_directory' % folder]))
if not res['OK']:
if not DErrno.cmpError(res, errno.ENOENT):
log.warn('Failed to delete dirac_directory placeholder file')
storageElements = gConfig.getValue(
'Resources/StorageElementGroups/SE_Cleaning_List', [])
failed = False
for storageElement in sorted(storageElements):
res = self.__removeStorageDirectory(folder, storageElement)
if not res['OK']:
failed = True
if failed:
return S_ERROR("Failed to clean storage directory at all SEs")
res = returnSingleResult(
self.fileCatalog.removeDirectory(folder, recursive=True))
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory(self, directory, storageElement):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement(storageElement, vo=self.voName)
res = returnSingleResult(se.exists(directory))
log = self.log.getSubLogger('__removeStorageDirectory')
if not res['OK']:
log.debug("Failed to obtain existance of directory", res['Message'])
return res
exists = res['Value']
if not exists:
log.debug("The directory %s does not exist at %s " %
(directory, storageElement))
return S_OK()
res = returnSingleResult(se.removeDirectory(directory, recursive=True))
if not res['OK']:
log.debug("Failed to remove storage directory", res['Message'])
return res
log.debug("Successfully removed %d files from %s at %s" % (res['Value']['FilesRemoved'],
directory,
storageElement))
return S_OK()
def __getCatalogDirectoryContents(self, directories):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
log = self.log.getSubLogger('__getCatalogDirectoryContents')
log.debug('Obtaining the catalog contents for %d directories:' %
len(directories))
activeDirs = directories
allFiles = {}
while len(activeDirs) > 0:
currentDir = activeDirs[0]
res = returnSingleResult(
self.fileCatalog.listDirectory(currentDir, verbose=True))
activeDirs.remove(currentDir)
if not res['OK']:
log.debug("Problem getting the %s directory content" %
currentDir, res['Message'])
else:
dirContents = res['Value']
activeDirs.extend(dirContents['SubDirs'])
allFiles.update(dirContents['Files'])
log.debug("Found %d files" % len(allFiles))
return S_OK(allFiles)
def getReplicasFromDirectory(self, directory):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if isinstance(directory, basestring):
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents(directories)
if not res['OK']:
return res
allReplicas = dict((lfn, metadata['Replicas'])
for lfn, metadata in res['Value'].iteritems())
return S_OK(allReplicas)
def getFilesFromDirectory(self, directory, days=0, wildcard='*'):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if isinstance(directory, basestring):
directories = [directory]
else:
directories = directory
log = self.log.getSubLogger('getFilesFromDirectory')
log.debug("Obtaining the files older than %d days in %d directories:" %
(days, len(directories)))
for folder in directories:
log.debug(folder)
activeDirs = directories
allFiles = []
while len(activeDirs) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult(
self.fileCatalog.listDirectory(currentDir, verbose=(days != 0)))
activeDirs.remove(currentDir)
if not res['OK']:
log.debug("Error retrieving directory contents", "%s %s" %
(currentDir, res['Message']))
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
log.debug("%s: %d files, %d sub-directories" %
(currentDir, len(files), len(subdirs)))
for subdir in subdirs:
if (not days) or _isOlderThan(subdirs[subdir]['CreationDate'], days):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append(subdir)
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get('Metadata', fileInfo)
if (not days) or not fileInfo.get('CreationDate') or _isOlderThan(fileInfo['CreationDate'], days):
if wildcard == '*' or fnmatch.fnmatch(fileName, wildcard):
fileName = fileInfo.get('LFN', fileName)
allFiles.append(fileName)
return S_OK(allFiles)
##########################################################################
#
# These are the data transfer methods
#
def getFile(self, lfn, destinationDir='', sourceSE=None):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
log = self.log.getSubLogger('getFile')
if isinstance(lfn, list):
lfns = lfn
elif isinstance(lfn, basestring):
lfns = [lfn]
else:
errStr = "Supplied lfn must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
log.debug("Attempting to get %s files." % len(lfns))
res = self.getActiveReplicas(lfns, getUrl=False)
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fileCatalog.getFileMetadata(lfnReplicas.keys())
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile(
lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir, sourceSE=sourceSE)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
return S_OK({'Successful': successful, 'Failed': failed})
def __getFile(self, lfn, replicas, metadata, destinationDir, sourceSE=None):
"""
Method actually doing the job to get a file from storage
"""
log = self.log.getSubLogger('__getFile')
if not replicas:
errStr = "No accessible replicas found"
log.debug(errStr)
return S_ERROR(errStr)
# Determine the best replicas
errTuple = ("No SE", "found")
if sourceSE is None:
sortedSEs = self._getSEProximity(replicas)
else:
if sourceSE not in replicas:
return S_ERROR('No replica at %s' % sourceSE)
else:
sortedSEs = [sourceSE]
for storageElementName in sortedSEs:
se = StorageElement(storageElementName, vo=self.voName)
res = returnSingleResult(se.getFile(
lfn, localPath=os.path.realpath(destinationDir)))
if not res['OK']:
errTuple = ("Error getting file from storage:", "%s from %s, %s" %
(lfn, storageElementName, res['Message']))
errToReturn = res
else:
localFile = os.path.realpath(os.path.join(
destinationDir, os.path.basename(lfn)))
localAdler = fileAdler(localFile)
if metadata['Size'] != res['Value']:
errTuple = ("Mismatch of sizes:", "downloaded = %d, catalog = %d" %
(res['Value'], metadata['Size']))
errToReturn = S_ERROR(DErrno.EFILESIZE, errTuple[1])
elif (metadata['Checksum']) and (not compareAdler(metadata['Checksum'], localAdler)):
errTuple = ("Mismatch of checksums:", "downloaded = %s, catalog = %s" %
(localAdler, metadata['Checksum']))
errToReturn = S_ERROR(DErrno.EBADCKS, errTuple[1])
else:
return S_OK(localFile)
# If we are here, there was an error, log it debug level
log.debug(errTuple[0], errTuple[1])
log.verbose("Failed to get local copy from any replicas:",
"\n%s %s" % errTuple)
return errToReturn
def _getSEProximity(self, replicas):
""" get SE proximity """
siteName = DIRAC.siteName()
self.__filterTapeSEs(replicas)
localSEs = [se for se in self.dmsHelper.getSEsAtSite(
siteName).get('Value', []) if se in replicas]
countrySEs = []
countryCode = str(siteName).split('.')[-1]
res = self.dmsHelper.getSEsAtCountry(countryCode)
if res['OK']:
countrySEs = [se for se in res['Value']
if se in replicas and se not in localSEs]
sortedSEs = randomize(localSEs) + randomize(countrySEs)
sortedSEs += randomize(se for se in replicas if se not in sortedSEs)
return sortedSEs
def putAndRegister(self, lfn, fileName, diracSE, guid=None, path=None,
checksum=None, overwrite=False):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
'overwrite' removes file from the file catalogue and SE before attempting upload
"""
res = self.__hasAccess('addFile', lfn)
if not res['OK']:
return res
log = self.log.getSubLogger('putAndRegister')
if lfn not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid(fileName)
if not checksum:
log.debug("Checksum information not provided. Calculating adler32.")
checksum = fileAdler(fileName)
# Make another try
if not checksum:
log.debug("Checksum calculation failed, try again")
checksum = fileAdler(fileName)
if checksum:
log.debug("Checksum calculated to be %s." % checksum)
else:
return S_ERROR(DErrno.EBADCKS, "Unable to calculate checksum")
res = self.fileCatalog.exists({lfn: guid})
if not res['OK']:
errStr = "Completely failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return res
if lfn not in res['Value']['Successful']:
errStr = "Failed to determine existence of destination LFN."
log.debug(errStr, lfn)
return S_ERROR(errStr)
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
if overwrite:
resRm = self.removeFile(lfn, force=True)
if not resRm['OK']:
errStr = "Failed to prepare file for overwrite"
log.debug(errStr, lfn)
return resRm
if lfn not in resRm['Value']['Successful']:
errStr = "Failed to either delete file or LFN"
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, lfn))
else:
errStr = "The supplied LFN already exists in the File Catalog."
log.debug(errStr, lfn)
return S_ERROR("%s %s" % (errStr, res['Value']['Successful'][lfn]))
else:
# If the returned LFN is different, this is the name of a file
# with the same GUID
errStr = "This file GUID already exists for another file"
log.debug(errStr, res['Value']['Successful'][lfn])
return S_ERROR("%s %s" % (errStr, res['Value']['Successful'][lfn]))
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (diracSE, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = _initialiseAccountingObject('putAndRegister', diracSE, 1)
oDataOperation.setStartTime()
oDataOperation.setValueByKey('TransferSize', size)
startTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - startTime
oDataOperation.setValueByKey('TransferTime', putTime)
if not res['OK']:
# We don't consider it a failure if the SE is not valid
if not DErrno.cmpError(res, errno.EACCES):
oDataOperation.setValueByKey('TransferOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
oDataOperation.setEndTime()
gDataStoreClient.addRegister(oDataOperation)
gDataStoreClient.commit()
startTime = time.time()
log.debug('putAndRegister: Sending accounting took %.1f seconds' %
(time.time() - startTime))
errStr = "Failed to put file to Storage Element."
log.debug(errStr, "%s: %s" % (fileName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
destinationSE = storageElement.storageElementName()
res = returnSingleResult(storageElement.getURL(
lfn, protocol=self.registrationProtocol))
if not res['OK']:
errStr = "Failed to generate destination PFN."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
destUrl = res['Value']
oDataOperation.setValueByKey('RegistrationTotal', 1)
fileTuple = (lfn, destUrl, size, destinationSE, guid, checksum)
registerDict = {'LFN': lfn, 'PFN': destUrl, 'Size': size,
'TargetSE': destinationSE, 'GUID': guid, 'Addler': checksum}
startTime = time.time()
res = self.registerFile(fileTuple)
registerTime = time.time() - startTime
oDataOperation.setValueByKey('RegistrationTime', registerTime)
if not res['OK']:
errStr = "Completely failed to register file."
log.debug(errStr, res['Message'])
failed[lfn] = {'register': registerDict}
oDataOperation.setValueByKey('FinalStatus', 'Failed')
elif lfn in res['Value']['Failed']:
errStr = "Failed to register file."
log.debug(errStr, "%s %s" % (lfn, res['Value']['Failed'][lfn]))
oDataOperation.setValueByKey('FinalStatus', 'Failed')
failed[lfn] = {'register': registerDict}
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey('RegistrationOK', 1)
oDataOperation.setEndTime()
gDataStoreClient.addRegister(oDataOperation)
startTime = time.time()
gDataStoreClient.commit()
log.debug('Sending accounting took %.1f seconds' %
(time.time() - startTime))
return S_OK({'Successful': successful, 'Failed': failed})
def replicateAndRegister(self, lfn, destSE, sourceSE='', destPath='', localCache='', catalog=''):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger('replicateAndRegister')
successful = {}
failed = {}
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
startReplication = time.time()
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "Completely failed to replicate file."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
if not res['Value']:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
successful[lfn] = {'replicate': 0, 'register': 0}
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
successful[lfn] = {'replicate': replicationTime}
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
log.debug("Attempting to register %s at %s." % (destPfn, destSE))
replicaTuple = (lfn, destPfn, destSE)
startRegistration = time.time()
res = self.registerReplica(replicaTuple, catalog=catalog)
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not
# registered
errStr = "Completely failed to register replica."
log.debug(errStr, res['Message'])
failed[lfn] = {'Registration': {
'LFN': lfn, 'TargetSE': destSE, 'PFN': destPfn}}
else:
if lfn in res['Value']['Successful']:
log.debug("Successfully registered replica.")
successful[lfn]['register'] = registrationTime
else:
errStr = "Failed to register replica."
log.debug(errStr, res['Value']['Failed'][lfn])
failed[lfn] = {'Registration': {
'LFN': lfn, 'TargetSE': destSE, 'PFN': destPfn}}
return S_OK({'Successful': successful, 'Failed': failed})
def replicate(self, lfn, destSE, sourceSE='', destPath='', localCache=''):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
log = self.log.getSubLogger('replicate')
log.debug("Attempting to replicate %s to %s." % (lfn, destSE))
res = self.__replicate(lfn, destSE, sourceSE, destPath, localCache)
if not res['OK']:
errStr = "Replication failed."
log.debug(errStr, "%s %s" % (lfn, destSE))
return res
if not res['Value']:
# The file was already present at the destination SE
log.debug("%s already present at %s." % (lfn, destSE))
return res
return S_OK(lfn)
def __getSERealName(self, storageName):
""" get the base name of an SE possibly defined as an alias"""
rootConfigPath = '/Resources/StorageElements'
configPath = '%s/%s' % (rootConfigPath, storageName)
res = gConfig.getOptions(configPath)
if not res['OK']:
errStr = "Failed to get storage options"
return S_ERROR(errStr)
if not res['Value']:
errStr = "Supplied storage doesn't exist."
return S_ERROR(errStr)
if 'Alias' in res['Value']:
configPath += '/Alias'
aliasName = gConfig.getValue(configPath)
result = self.__getSERealName(aliasName)
if not result['OK']:
return result
resolvedName = result['Value']
else:
resolvedName = storageName
return S_OK(resolvedName)
def __isSEInList(self, seName, seList):
""" Check whether an SE is in a list of SEs... All could be aliases """
seSet = set()
for se in seList:
res = self.__getSERealName(se)
if res['OK']:
seSet.add(res['Value'])
return self.__getSERealName(seName).get('Value') in seSet
def __replicate(self, lfn, destSEName, sourceSEName='', destPath='', localCache=''):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' if cannot do third party transfer, we do get and put through this local directory
"""
log = self.log.getSubLogger('__replicate', True)
###########################################################
# Check that we have write permissions to this directory.
res = self.__hasAccess('addReplica', lfn)
if not res['OK']:
return res
if lfn not in res['Value']['Successful']:
errStr = "__replicate: Write access not permitted for this credential."
log.debug(errStr, lfn)
return S_ERROR(errStr)
# Check that the destination storage element is sane and resolve its name
log.debug("Verifying destination StorageElement validity (%s)." %
(destSEName))
destStorageElement = StorageElement(destSEName, vo=self.voName)
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (destSEName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
# Get the real name of the SE
destSEName = destStorageElement.storageElementName()
###########################################################
# Check whether the destination storage element is banned
log.verbose(
"Determining whether %s ( destination ) is Write-banned." % destSEName)
if not destStorageElement.status()['Write']:
infoStr = "Supplied destination Storage Element is not currently allowed for Write."
log.debug(infoStr, destSEName)
return S_ERROR(infoStr)
# Get the LFN replicas from the file catalog
log.debug("Attempting to obtain replicas for %s." % (lfn))
res = returnSingleResult(self.getReplicas(lfn, getUrl=False))
if not res['OK']:
errStr = "Failed to get replicas for LFN."
log.debug(errStr, "%s %s" % (lfn, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
log.debug("Successfully obtained replicas for LFN.")
lfnReplicas = res['Value']
###########################################################
# If the file catalog size is zero fail the transfer
log.debug("Attempting to obtain size for %s." % lfn)
res = returnSingleResult(self.fileCatalog.getFileSize(lfn))
if not res['OK']:
errStr = "Failed to get size for LFN."
log.debug(errStr, "%s %s" % (lfn, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
catalogSize = res['Value']
if catalogSize == 0:
errStr = "Registered file size is 0."
log.debug(errStr, lfn)
return S_ERROR(errStr)
log.debug("File size determined to be %s." % catalogSize)
###########################################################
# If the LFN already exists at the destination we have nothing to do
if self.__isSEInList(destSEName, lfnReplicas):
log.debug("__replicate: LFN is already registered at %s." % destSEName)
return S_OK()
###########################################################
# If the source is specified, check that it is in the replicas
if sourceSEName:
log.debug("Determining whether source Storage Element specified is sane.")
if sourceSEName not in lfnReplicas:
errStr = "LFN does not exist at supplied source SE."
log.error(errStr, "%s %s" % (lfn, sourceSEName))
return S_ERROR(errStr)
# If sourceSE is specified, then we consider this one only, otherwise
# we consider them all
possibleSourceSEs = [sourceSEName] if sourceSEName else lfnReplicas
# We sort the possibileSourceSEs with the SEs that are on the same site than the destination first
# reverse = True because True > False
possibleSourceSEs = sorted(possibleSourceSEs,
key=lambda x: self.dmsHelper.isSameSiteSE(
x, destSEName).get('Value', False),
reverse=True)
# In case we manage to find SEs that would work as a source, but we can't negotiate a protocol
# we will do a get and put using one of this sane SE
possibleIntermediateSEs = []
# Take into account the destination path
if destPath:
destPath = '%s/%s' % (destPath, os.path.basename(lfn))
else:
destPath = lfn
for candidateSEName in possibleSourceSEs:
log.debug("Consider %s as a source" % candidateSEName)
# Check that the candidate is active
if not self.__checkSEStatus(candidateSEName, status='Read'):
log.debug("%s is currently not allowed as a source." % candidateSEName)
continue
else:
log.debug("%s is available for use." % candidateSEName)
candidateSE = StorageElement(candidateSEName, vo=self.voName)
# Check that the SE is valid
res = candidateSE.isValid()
if not res['OK']:
log.verbose("The storage element is not currently valid.",
"%s %s" % (candidateSEName, res['Message']))
continue
else:
log.debug("The storage is currently valid", candidateSEName)
# Check that the file size corresponds to the one in the FC
res = returnSingleResult(candidateSE.getFileSize(lfn))
if not res['OK']:
log.debug("could not get fileSize on %s" %
candidateSEName, res['Message'])
continue
seFileSize = res['Value']
if seFileSize != catalogSize:
log.debug("Catalog size and physical file size mismatch.",
"%s %s" % (catalogSize, seFileSize))
continue
else:
log.debug("Catalog size and physical size match")
res = destStorageElement.negociateProtocolWithOtherSE(
candidateSE, protocols=self.thirdPartyProtocols)
if not res['OK']:
log.debug("Error negotiating replication protocol", res['Message'])
continue
replicationProtocols = res['Value']
if not replicationProtocols:
possibleIntermediateSEs.append(candidateSE)
log.debug("No protocol suitable for replication found")
continue
log.debug('Found common protocols', replicationProtocols)
# THIS WOULD NOT WORK IF PROTO == file !!
# Why did I write that comment ?!
# We try the protocols one by one
# That obviously assumes that there is an overlap and not only
# a compatibility between the output protocols of the source
# and the input protocols of the destination.
# But that is the only way to make sure we are not replicating
# over ourselves.
for compatibleProtocol in replicationProtocols:
# Compare the urls to make sure we are not overwriting
res = returnSingleResult(candidateSE.getURL(
lfn, protocol=compatibleProtocol))
if not res['OK']:
log.debug("Cannot get sourceURL", res['Message'])
continue
sourceURL = res['Value']
destURL = ''
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=compatibleProtocol))
if not res['OK']:
# for some protocols, in particular srm
# you might get an error because the file does not exist
# which is exactly what we want
# in that case, we just keep going with the comparison
# since destURL will be an empty string
if not DErrno.cmpError(res, errno.ENOENT):
log.debug("Cannot get destURL", res['Message'])
continue
else:
log.debug("File does not exist: Expected error for TargetSE !!")
destURL = res['Value']
if sourceURL == destURL:
log.debug("Same source and destination, give up")
continue
# Attempt the transfer
res = returnSingleResult(destStorageElement.replicateFile({destPath: sourceURL},
sourceSize=catalogSize,
inputProtocol=compatibleProtocol))
if not res['OK']:
log.debug("Replication failed", "%s from %s to %s." %
(lfn, candidateSEName, destSEName))
continue
log.debug("Replication successful.", res['Value'])
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=self.registrationProtocol))
if not res['OK']:
log.debug('Error getting the registration URL', res['Message'])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK({'DestSE': destSEName, 'DestPfn': registrationURL})
# If we are here, that means that we could not make a third party transfer.
# Check if we have some sane SEs from which we could do a get/put
localDir = os.path.realpath(localCache if localCache else '.')
localFile = os.path.join(localDir, os.path.basename(lfn))
log.debug("Will try intermediate transfer from %s sources" %
len(possibleIntermediateSEs))
for candidateSE in possibleIntermediateSEs:
res = returnSingleResult(candidateSE.getFile(lfn, localPath=localDir))
if not res['OK']:
log.debug('Error getting the file from %s' %
candidateSE.name, res['Message'])
continue
res = returnSingleResult(
destStorageElement.putFile({destPath: localFile}))
# Remove the local file whatever happened
try:
os.remove(localFile)
except OSError as e:
log.error('Error removing local file', '%s %s' % (localFile, e))
if not res['OK']:
log.debug('Error putting file coming from %s' %
candidateSE.name, res['Message'])
# if the put is the problem, it's maybe pointless to try the other
# candidateSEs...
continue
# get URL with default protocol to return it
res = returnSingleResult(destStorageElement.getURL(
destPath, protocol=self.registrationProtocol))
if not res['OK']:
log.debug('Error getting the registration URL', res['Message'])
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK({'DestSE': destSEName, 'DestPfn': registrationURL})
# If here, we are really doomed
errStr = "Failed to replicate with all sources."
log.debug(errStr, lfn)
return S_ERROR(errStr)
###################################################################
#
# These are the file catalog write methods
#
def registerFile(self, fileTuple, catalog=''):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
log = self.log.getSubLogger('registerFile')
if isinstance(fileTuple, (list, set)):
fileTuples = fileTuple
elif isinstance(fileTuple, tuple):
fileTuples = [fileTuple]
for fileTuple in fileTuples:
if not isinstance(fileTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not fileTuples:
return S_OK({'Successful': [], 'Failed': {}})
log.debug("Attempting to register %s files." % len(fileTuples))
res = self.__registerFile(fileTuples, catalog)
if not res['OK']:
errStr = "Completely failed to register files."
log.debug(errStr, res['Message'])
return res
return res
def __registerFile(self, fileTuples, catalog):
""" register file to catalog """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN': physicalFile,
'Size': fileSize,
'SE': storageElementName,
'GUID': fileGuid,
'Checksum': checksum}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
if not fileCatalog.isOK():
return S_ERROR("Can't get FileCatalog %s" % catalog)
else:
fileCatalog = self.fileCatalog
res = fileCatalog.addFile(fileDict)
if not res['OK']:
errStr = "Completely failed to register files."
self.log.getSubLogger('__registerFile').debug(errStr, res['Message'])
return res
def registerReplica(self, replicaTuple, catalog=''):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
log = self.log.getSubLogger('registerReplica')
if isinstance(replicaTuple, (list, set)):
replicaTuples = replicaTuple
elif isinstance(replicaTuple, tuple):
replicaTuples = [replicaTuple]
for replicaTuple in replicaTuples:
if not isinstance(replicaTuple, tuple):
errStr = "Supplied file info must be tuple or list of tuples."
log.debug(errStr)
return S_ERROR(errStr)
if not replicaTuples:
return S_OK({'Successful': [], 'Failed': {}})
log.debug("Attempting to register %s replicas." % len(replicaTuples))
res = self.__registerReplica(replicaTuples, catalog)
if not res['OK']:
errStr = "Completely failed to register replicas."
log.debug(errStr, res['Message'])
return res
return res
def __registerReplica(self, replicaTuples, catalog):
""" register replica to catalogue """
log = self.log.getSubLogger('__registerReplica')
seDict = {}
for lfn, url, storageElementName in replicaTuples:
seDict.setdefault(storageElementName, []).append((lfn, url))
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.iteritems():
destStorageElement = StorageElement(storageElementName, vo=self.voName)
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (storageElementName, res['Message']))
for lfn, url in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.storageElementName()
for lfn, url in replicaTuple:
res = returnSingleResult(destStorageElement.getURL(
lfn, protocol=self.registrationProtocol))
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = (lfn, res['Value'], storageElementName, False)
replicaTuples.append(replicaTuple)
log.debug("Successfully resolved %s replicas for registration." %
len(replicaTuples))
# HACK!
replicaDict = {}
for lfn, url, se, _master in replicaTuples:
replicaDict[lfn] = {'SE': se, 'PFN': url}
if catalog:
fileCatalog = FileCatalog(catalog, vo=self.voName)
res = fileCatalog.addReplica(replicaDict)
else:
res = self.fileCatalog.addReplica(replicaDict)
if not res['OK']:
errStr = "Completely failed to register replicas."
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile(self, lfn, force=None):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
log = self.log.getSubLogger('removeFile')
if not lfn:
return S_OK({'Successful': {}, 'Failed': {}})
if force is None:
force = self.ignoreMissingInFC
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
# First check if the file exists in the FC
res = self.fileCatalog.exists(lfns)
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn]]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys(
(lfn for lfn in success if not success[lfn]), True)
else:
failed = dict.fromkeys(
(lfn for lfn in success if not success[lfn]), 'No such file or directory')
# Check that we have write permissions to this directory and to the file.
if lfns:
res = self.__hasAccess('removeFile', lfns)
if not res['OK']:
return res
if res['Value']['Failed']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, 'for %d files' % len(res['Value']['Failed']))
failed.update(dict.fromkeys(res['Value']['Failed'], errStr))
lfns = res['Value']['Successful']
if lfns:
log.debug(
"Attempting to remove %d files from Storage and Catalogue. Get replicas first" % len(lfns))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value']['Failed'].iteritems():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile(lfnDict)
if not res['OK']:
# This can never happen
return res
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
gDataStoreClient.commit()
return S_OK({'Successful': successful, 'Failed': failed})
def __removeFile(self, lfnDict):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted(lfnDict.items(), reverse=True):
for se in repDict:
storageElementDict.setdefault(se, []).append(lfn)
failed = {}
successful = {}
for storageElementName in sorted(storageElementDict):
lfns = storageElementDict[storageElementName]
res = self.__removeReplica(storageElementName, lfns, replicaDict=lfnDict)
if not res['OK']:
errStr = res['Message']
for lfn in lfns:
failed[lfn] = failed.setdefault(lfn, '') + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].iteritems():
failed[lfn] = failed.setdefault(lfn, '') + " %s" % errStr
completelyRemovedFiles = set(lfnDict) - set(failed)
if completelyRemovedFiles:
res = self.fileCatalog.removeFile(list(completelyRemovedFiles))
if not res['OK']:
failed.update(dict.fromkeys(completelyRemovedFiles,
"Failed to remove file from the catalog: %s" % res['Message']))
else:
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplica(self, storageElementName, lfn):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
log = self.log.getSubLogger('removeReplica')
if isinstance(lfn, (list, dict, set, tuple)):
lfns = set(lfn)
else:
lfns = set([lfn])
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
# Check that we have write permissions to this file.
res = self.__hasAccess('removeReplica', lfns)
if not res['OK']:
log.debug('Error in __verifyWritePermisison', res['Message'])
return res
if res['Value']['Failed']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, 'for %d files' % len(res['Value']['Failed']))
failed.update(dict.fromkeys(res['Value']['Failed'], errStr))
lfns -= set(res['Value']['Failed'])
if not lfns:
log.debug('Permission denied for all files')
else:
log.debug("Will remove %s lfns at %s." % (len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(list(lfns), allStatus=True)
if not res['OK']:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
failed.update(res['Value']['Failed'])
replicaDict = res['Value']['Successful']
lfnsToRemove = set()
for lfn, repDict in replicaDict.iteritems():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to
# remove it
successful[lfn] = True
elif len(repDict) == 1:
# The file has only a single replica so don't remove
log.debug("The replica you are trying to remove is the only one.",
"%s @ %s" % (lfn, storageElementName))
failed[lfn] = "Failed to remove sole replica"
else:
lfnsToRemove.add(lfn)
if lfnsToRemove:
res = self.__removeReplica(
storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res['OK']:
log.debug("Failed in __removeReplica", res['Message'])
return res
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
gDataStoreClient.commit()
return S_OK({'Successful': successful, 'Failed': failed})
def __removeReplica(self, storageElementName, lfns, replicaDict=None):
""" remove replica
Remove the replica from the storageElement, and then from the catalog
:param storageElementName : The name of the storage Element
:param lfns : list of lfn we want to remove
:param replicaDict : cache of fc.getReplicas(lfns) : { lfn { se : catalog url } }
"""
log = self.log.getSubLogger('__removeReplica')
failed = {}
successful = {}
replicaDict = replicaDict if replicaDict else {}
lfnsToRemove = set()
for lfn in lfns:
res = self.__hasAccess('removeReplica', lfn)
if not res['OK']:
log.debug('Error in __verifyWritePermission', res['Message'])
return res
if lfn not in res['Value']['Successful']:
errStr = "Write access not permitted for this credential."
log.debug(errStr, lfn)
failed[lfn] = errStr
else:
lfnsToRemove.add(lfn)
# Remove physical replicas first
res = self.__removePhysicalReplica(
storageElementName, lfnsToRemove, replicaDict=replicaDict)
if not res['OK']:
errStr = "Failed to remove physical replicas."
log.debug(errStr, res['Message'])
return res
failed.update(res['Value']['Failed'])
# Here we use the FC PFN...
replicaTuples = [(lfn, replicaDict[lfn][storageElementName], storageElementName)
for lfn in res['Value']['Successful']]
if replicaTuples:
res = self.__removeCatalogReplica(replicaTuples)
if not res['OK']:
errStr = "Completely failed to remove physical files."
log.debug(errStr, res['Message'])
failed.update(dict.fromkeys(
(lfn for lfn, _pfn, _se in replicaTuples), res['Message']))
successful = {}
else:
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplicaFromCatalog(self, storageElementName, lfn):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# FIXME: this method is dangerous ans should eventually be removed as well
# as the script dirac-dms-remove-catalog-replicas
log = self.log.getSubLogger('removeReplicaFromCatalog')
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to
# be removed
if isinstance(lfn, (list, dict, set, tuple)):
lfns = list(lfn)
else:
lfns = [lfn]
for lfn in lfns:
if not isinstance(lfn, basestring):
errStr = "Supplied lfns must be string or list of strings."
log.debug(errStr)
return S_ERROR(errStr)
successful = {}
failed = {}
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
log.debug("Will remove catalogue entry for %s lfns at %s." %
(len(lfns), storageElementName))
res = self.fileCatalog.getReplicas(lfns, allStatus=True)
if not res['OK']:
errStr = "Completely failed to get replicas for lfns."
log.debug(errStr, res['Message'])
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].iteritems():
if reason in ('No such file or directory', 'File has zero replicas'):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].iteritems():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove
# it
successful[lfn] = True
else:
replicaTuples.append(
(lfn, repDict[storageElementName], storageElementName))
log.debug("Resolved %s pfns for catalog removal at %s." % (len(replicaTuples),
storageElementName))
res = self.__removeCatalogReplica(replicaTuples)
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
def __removeCatalogReplica(self, replicaTuples):
""" remove replica form catalogue
:param replicaTuples : list of (lfn, catalogPFN, se)
"""
log = self.log.getSubLogger('__removeCatalogReplica')
oDataOperation = _initialiseAccountingObject(
'removeCatalogReplica', '', len(replicaTuples))
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuples:
replicaDict[lfn] = {'SE': se, 'PFN': pfn}
res = self.fileCatalog.removeReplica(replicaDict)
oDataOperation.setEndTime()
oDataOperation.setValueByKey('RegistrationTime', time.time() - start)
if not res['OK']:
oDataOperation.setValueByKey('RegistrationOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
gDataStoreClient.addRegister(oDataOperation)
errStr = "Completely failed to remove replica: "
log.debug(errStr, res['Message'])
return S_ERROR("%s %s" % (errStr, res['Message']))
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn, error in failed.items():
# Ignore error if file doesn't exist
# This assumes all catalogs return an error as { catalog : error }
for catalog, err in error.items():
if 'no such file' in err.lower():
success.setdefault(lfn, {}).update({catalog: True})
error.pop(catalog)
if not failed[lfn]:
failed.pop(lfn)
else:
log.error("Failed to remove replica.", "%s %s" % (lfn, error))
# Only for logging information
if success:
log.debug("Removed %d replicas" % len(success))
for lfn in success:
log.debug("Successfully removed replica.", lfn)
oDataOperation.setValueByKey('RegistrationOK', len(success))
gDataStoreClient.addRegister(oDataOperation)
return res
def __removePhysicalReplica(self, storageElementName, lfnsToRemove, replicaDict=None):
""" remove replica from storage element
:param storageElementName : name of the storage Element
:param lfnsToRemove : set of lfn to removes
:param replicaDict : cache of fc.getReplicas, to be passed to the SE
"""
log = self.log.getSubLogger('__removePhysicalReplica')
log.debug("Attempting to remove %s pfns at %s." %
(len(lfnsToRemove), storageElementName))
storageElement = StorageElement(storageElementName, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (storageElementName, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
oDataOperation = _initialiseAccountingObject('removePhysicalReplica',
storageElementName,
len(lfnsToRemove))
oDataOperation.setStartTime()
start = time.time()
lfnsToRemove = list(lfnsToRemove)
ret = storageElement.getFileSize(lfnsToRemove, replicaDict=replicaDict)
deletedSizes = ret.get('Value', {}).get('Successful', {})
res = storageElement.removeFile(lfnsToRemove, replicaDict=replicaDict)
oDataOperation.setEndTime()
oDataOperation.setValueByKey('TransferTime', time.time() - start)
if not res['OK']:
oDataOperation.setValueByKey('TransferOK', 0)
oDataOperation.setValueByKey('FinalStatus', 'Failed')
gDataStoreClient.addRegister(oDataOperation)
log.debug("Failed to remove replicas.", res['Message'])
else:
for lfn, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][lfn] = lfn
res['Value']['Failed'].pop(lfn)
for lfn in res['Value']['Successful']:
res['Value']['Successful'][lfn] = True
deletedSize = sum(deletedSizes.get(lfn, 0)
for lfn in res['Value']['Successful'])
oDataOperation.setValueByKey('TransferSize', deletedSize)
oDataOperation.setValueByKey(
'TransferOK', len(res['Value']['Successful']))
gDataStoreClient.addRegister(oDataOperation)
infoStr = "Successfully issued accounting removal request."
log.debug(infoStr)
return res
#########################################################################
#
# File transfer methods
#
def put(self, lfn, fileName, diracSE, path=None):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
log = self.log.getSubLogger('put')
# Check that the local file exists
if not os.path.exists(fileName):
errStr = "Supplied file does not exist."
log.debug(errStr, fileName)
return S_ERROR(errStr)
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname(lfn)
# Obtain the size of the local file
size = getSize(fileName)
if size == 0:
errStr = "Supplied file is zero size."
log.debug(errStr, fileName)
return S_ERROR(errStr)
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement(diracSE, vo=self.voName)
res = storageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.verbose(errStr, "%s %s" % (diracSE, res['Message']))
return S_ERROR("%s %s" % (errStr, res['Message']))
fileDict = {lfn: fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult(storageElement.putFile(fileDict))
putTime = time.time() - startTime
if not res['OK']:
errStr = "Failed to put file to Storage Element."
failed[lfn] = res['Message']
log.debug(errStr, "%s: %s" % (fileName, res['Message']))
else:
log.debug("Put file to storage in %s seconds." % putTime)
successful[lfn] = res['Value']
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
#########################################################################
#
# File catalog methods
#
def getActiveReplicas(self, lfns, getUrl=True, diskOnly=False, preferDisk=False):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
return self.getReplicas(lfns, allStatus=False, getUrl=getUrl, diskOnly=diskOnly,
preferDisk=preferDisk, active=True)
def __filterTapeReplicas(self, replicaDict, diskOnly=False):
"""
Check a replica dictionary for disk replicas:
If there is a disk replica, removetape replicas, else keep all
The input argument is modified
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se,
(self.__checkSEStatus(se, status='DiskSE'),
self.__checkSEStatus(se, status='TapeSE'))) for se in seList)
# Beware, there is a del below
for lfn, replicas in replicaDict['Successful'].items():
self.__filterTapeSEs(replicas, diskOnly=diskOnly, seStatus=seStatus)
# If diskOnly, one may not have any replica in the end, set Failed
if diskOnly and not replicas:
del replicaDict['Successful'][lfn]
replicaDict['Failed'][lfn] = 'No disk replicas'
return
def __filterReplicasForJobs(self, replicaDict):
""" Remove the SEs that are not to be used for jobs, and archive SEs if there are others
The input argument is modified
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, (self.dmsHelper.isSEForJobs(
se), self.dmsHelper.isSEArchive(se))) for se in seList)
# Beware, there is a del below
for lfn, replicas in replicaDict['Successful'].items():
otherThanArchive = set(se for se in replicas if not seStatus[se][1])
for se in replicas.keys():
# Remove the SE if it should not be used for jobs or if it is an
# archive and there are other SEs
if not seStatus[se][0] or (otherThanArchive and seStatus[se][1]):
replicas.pop(se)
# If in the end there is no replica, set Failed
if not replicas:
del replicaDict['Successful'][lfn]
replicaDict['Failed'][lfn] = 'No replicas for jobs'
return
def __filterTapeSEs(self, replicas, diskOnly=False, seStatus=None):
""" Remove the tape SEs as soon as there is one disk SE or diskOnly is requested
The input argument is modified
"""
# Build the SE status cache if not existing
if seStatus is None:
seStatus = dict((se,
(self.__checkSEStatus(se, status='DiskSE'),
self.__checkSEStatus(se, status='TapeSE'))) for se in replicas)
for se in replicas: # There is a del below but we then return!
# First find a disk replica, otherwise do nothing unless diskOnly is set
if diskOnly or seStatus[se][0]:
# There is one disk replica, remove tape replicas and exit loop
for se in replicas.keys(): # Beware: there is a pop below
if seStatus[se][1]:
replicas.pop(se)
return
return
def checkActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas, and verify input structure first
"""
if not isinstance(replicaDict, dict):
return S_ERROR('Wrong argument type %s, expected a dictionary' % type(replicaDict))
for key in ['Successful', 'Failed']:
if key not in replicaDict:
return S_ERROR('Missing key "%s" in replica dictionary' % key)
if not isinstance(replicaDict[key], dict):
return S_ERROR('Wrong argument type %s, expected a dictionary' % type(replicaDict[key]))
activeDict = {'Successful': {}, 'Failed': replicaDict['Failed'].copy()}
for lfn, replicas in replicaDict['Successful'].iteritems():
if not isinstance(replicas, dict):
activeDict['Failed'][lfn] = 'Wrong replica info'
else:
activeDict['Successful'][lfn] = replicas.copy()
self.__filterActiveReplicas(activeDict)
return S_OK(activeDict)
def __filterActiveReplicas(self, replicaDict):
"""
Check a replica dictionary for active replicas
The input dict is modified, no returned value
"""
seList = set(
se for ses in replicaDict['Successful'].itervalues() for se in ses)
# Get a cache of SE statuses for long list of replicas
seStatus = dict((se, self.__checkSEStatus(se, status='Read'))
for se in seList)
for replicas in replicaDict['Successful'].itervalues():
for se in replicas.keys(): # Beware: there is a pop below
if not seStatus[se]:
replicas.pop(se)
return
def __checkSEStatus(self, se, status='Read'):
""" returns the value of a certain SE status flag (access or other) """
return StorageElement(se, vo=self.voName).status().get(status, False)
def getReplicas(self, lfns, allStatus=True, getUrl=True, diskOnly=False, preferDisk=False, active=False):
""" get replicas from catalogue and filter if requested
Warning: all filters are independent, hence active and preferDisk should be set if using forJobs
"""
catalogReplicas = {}
failed = {}
for lfnChunk in breakListIntoChunks(lfns, 1000):
res = self.fileCatalog.getReplicas(lfnChunk, allStatus=allStatus)
if res['OK']:
catalogReplicas.update(res['Value']['Successful'])
failed.update(res['Value']['Failed'])
else:
return res
if not getUrl:
for lfn in catalogReplicas:
catalogReplicas[lfn] = dict.fromkeys(catalogReplicas[lfn], True)
elif not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
# We group the query to getURL by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault(se, []).append(lfn)
for se in se_lfn:
seObj = StorageElement(se, vo=self.voName)
succPfn = seObj.getURL(se_lfn[se],
protocol=self.registrationProtocol).get('Value', {}).get('Successful', {})
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res
# will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
result = {'Successful': catalogReplicas, 'Failed': failed}
if active:
self.__filterActiveReplicas(result)
if diskOnly or preferDisk:
self.__filterTapeReplicas(result, diskOnly=diskOnly)
return S_OK(result)
def getReplicasForJobs(self, lfns, allStatus=False, getUrl=True, diskOnly=False):
""" get replicas useful for jobs
"""
# Call getReplicas with no filter and enforce filters in this method
result = self.getReplicas(lfns, allStatus=allStatus, getUrl=getUrl)
if not result['OK']:
return result
replicaDict = result['Value']
# For jobs replicas must be active
self.__filterActiveReplicas(replicaDict)
# For jobs, give preference to disk replicas but not only
self.__filterTapeReplicas(replicaDict, diskOnly=diskOnly)
# don't use SEs excluded for jobs (e.g. Failover)
self.__filterReplicasForJobs(replicaDict)
return S_OK(replicaDict)
# 3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists(self, storageElementName, lfn, method, **kwargs):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
log = self.log.getSubLogger('__executeIfReplicaExists')
# # default value
kwargs = kwargs if kwargs else {}
# # get replicas for lfn
res = FileCatalog(vo=self.voName).getReplicas(lfn)
if not res["OK"]:
errStr = "Completely failed to get replicas for LFNs."
log.debug(errStr, res["Message"])
return res
# # returned dict, get failed replicase
retDict = {"Failed": res["Value"]["Failed"],
"Successful": {}}
# # print errors
for lfn, reason in retDict["Failed"].iteritems():
log.error("_callReplicaSEFcn: Failed to get replicas for file.",
"%s %s" % (lfn, reason))
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
lfnList = []
for lfn, replicas in lfnReplicas.iteritems():
if storageElementName in replicas:
lfnList.append(lfn)
else:
errStr = "File hasn't got replica at supplied Storage Element."
log.error(errStr, "%s %s" % (lfn, storageElementName))
retDict["Failed"][lfn] = errStr
if 'replicaDict' not in kwargs:
kwargs['replicaDict'] = lfnReplicas
# # call StorageElement function at least
se = StorageElement(storageElementName, vo=self.voName)
fcn = getattr(se, method)
res = fcn(lfnList, **kwargs)
# # check result
if not res["OK"]:
errStr = "Failed to execute %s StorageElement method." % method
log.error(errStr, res["Message"])
return res
# # filter out failed and successful
retDict["Successful"].update(res["Value"]["Successful"])
retDict["Failed"].update(res["Value"]["Failed"])
return S_OK(retDict)
def getReplicaIsFile(self, lfn, storageElementName):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "isFile")
def getReplicaSize(self, lfn, storageElementName):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileSize")
def getReplicaAccessUrl(self, lfn, storageElementName, protocol=False):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getURL", protocol=protocol)
def getReplicaMetadata(self, lfn, storageElementName):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "getFileMetadata")
def prestageReplica(self, lfn, storageElementName, lifetime=86400):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"prestageFile", lifetime=lifetime)
def pinReplica(self, lfn, storageElementName, lifetime=86400):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"pinFile", lifetime=lifetime)
def releaseReplica(self, lfn, storageElementName):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn, "releaseFile")
def getReplica(self, lfn, storageElementName, localPath=False):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists(storageElementName, lfn,
"getFile", localPath=localPath)
| gpl-3.0 | -2,352,551,176,563,042,300 | 38.539565 | 112 | 0.635152 | false |
Sebubu/mushroom_crawler | mushroom/GoogleInceptionV3.py | 1 | 6668 | from keras.models import Graph
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.advanced_activations import PReLU
import datetime
'''
Inception v3 paper
http://arxiv.org/pdf/1512.00567v1.pdf
Old inception paper
http://arxiv.org/pdf/1409.4842.pdf
'''
def activation_function():
return "relu"
def cinput_shape(graph):
shape = list(graph.output_shape)
shape.pop(0)
return shape
def conv(input_shape):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(32, 3, 3, subsample=(2,2), activation=activation_function()), name="conv1", input="input")
graph.add_node(Convolution2D(32, 3,3, activation=activation_function()), name="conv2", input="conv1")
graph.add_node(Convolution2D(64, 3,3, activation=activation_function()), name="conv3", input="conv2")
graph.add_node(MaxPooling2D((3, 3), stride=(2, 2)), name="pool4", input="conv3")
graph.add_node(Convolution2D(80, 3,3, activation=activation_function()), name="conv5", input="pool4")
graph.add_node(Convolution2D(192, 3,3, subsample=(2,2), activation=activation_function()), name="conv6", input="conv5")
graph.add_node(Convolution2D(288, 3,3, activation=activation_function()), name="conv7", input="conv6")
graph.add_output("output", input="conv7")
return graph
def inception4(input_shape):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(16, 1, 1, activation=activation_function()), "conv1_1", "input")
graph.add_node(ZeroPadding2D(padding=(2, 2)), "zero1_2", "conv1_1")
graph.add_node(Convolution2D(32, 5, 5, activation=activation_function()), "conv1_3", "zero1_2")
graph.add_node(Convolution2D(96, 1, 1, activation=activation_function()), "conv2_1", "input")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero2_2", "conv2_1")
graph.add_node(Convolution2D(128, 3, 3, activation=activation_function()), "conv2_3", "zero2_2")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero3_1", "input")
graph.add_node(MaxPooling2D((3, 3), stride=(1, 1)), "pool3_2", "zero3_1")
graph.add_node(Convolution2D(32, 1, 1, activation=activation_function()), "conv3_3","pool3_2")
graph.add_node(Convolution2D(64, 1, 1, activation=activation_function()), "conv4_1", "input")
graph.add_output("output", inputs=["conv1_3", "conv2_3", "conv3_3", "conv4_1"], merge_mode="concat", concat_axis=1)
return graph
def inception5(input_shape):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(16, 1, 1, activation=activation_function()), "conv1_1", "input")
graph.add_node(ZeroPadding2D(padding=(2, 2)), "zero1_2", "conv1_1")
graph.add_node(Convolution2D(32, 3, 3, activation=activation_function()), "conv1_3", "zero1_2")
graph.add_node(Convolution2D(32, 3, 3, activation=activation_function()), "conv1_4", "conv1_3")
graph.add_node(Convolution2D(96, 1, 1, activation=activation_function()), "conv2_1", "input")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero2_2", "conv2_1")
graph.add_node(Convolution2D(128, 3, 3, activation=activation_function()), "conv2_3", "zero2_2")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero3_1", "input")
graph.add_node(MaxPooling2D((3, 3), stride=(1, 1)), "pool3_2", "zero3_1")
graph.add_node(Convolution2D(32, 1, 1, activation=activation_function()), "conv3_3", "pool3_2")
graph.add_node(Convolution2D(64, 1, 1, activation=activation_function()), "conv4_1", "input")
graph.add_output("output",inputs=["conv1_4", "conv2_3", "conv3_3", "conv4_1"], merge_mode="concat", concat_axis=1)
return graph
def inception6(input_shape, n):
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(Convolution2D(16, 1, 1, activation=activation_function()), "conv1_1", "input")
graph.add_node(ZeroPadding2D(padding=(2, 2)), "zero1_2", "conv1_1")
graph.add_node(Convolution2D(32, 1, n, activation=activation_function()), "conv1_3", "zero1_2")
graph.add_node(Convolution2D(32, n, 1, activation=activation_function()), "conv1_4", "conv1_3")
graph.add_node(Convolution2D(32, 1, n, activation=activation_function()), "conv1_5", "conv1_4")
graph.add_node(Convolution2D(32, n, 1, activation=activation_function()), "conv1_6", "conv1_5")
graph.add_node(Convolution2D(96, 1, 1, activation=activation_function()), "conv2_1", "input")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero2_2", "conv2_1")
graph.add_node(Convolution2D(128, 1, n, activation=activation_function()), "conv2_3", "zero2_2")
graph.add_node(Convolution2D(128, n, 1, activation=activation_function()), "conv2_4", "conv2_3")
graph.add_node(ZeroPadding2D(padding=(1, 1)), "zero3_1", "input")
graph.add_node(MaxPooling2D((3, 3), stride=(1, 1)), "pool3_2", "zero3_1")
graph.add_node(Convolution2D(32, 1, 1, activation=activation_function()), "conv3_3", "pool3_2")
graph.add_node(Convolution2D(64, 1, 1, activation=activation_function()), "conv4_1", "input")
graph.add_output("output", inputs=["conv1_6", "conv2_4", "conv3_3", "conv4_1"], merge_mode="concat", concat_axis=1)
return graph
def printl(name):
print str(datetime.datetime.now()) + name
def create_model():
input_shape = (3,244,244)
n = 3
conv1 = conv(input_shape)
inc41 = inception4(cinput_shape(conv1))
inc42 = inception4(cinput_shape(inc41))
inc43 = inception4(cinput_shape(inc42))
inc51 = inception5(cinput_shape(inc43))
inc52 = inception5(cinput_shape(inc51))
inc53 = inception5(cinput_shape(inc52))
inc54 = inception5(cinput_shape(inc53))
inc55 = inception5(cinput_shape(inc54))
inc61 = inception6(cinput_shape(inc55), n)
inc62 = inception6(cinput_shape(inc61), n)
inc63 = inception6(cinput_shape(inc62), n)
graph = Graph()
graph.add_input("input", input_shape)
graph.add_node(conv1,"conv1", "input")
graph.add_node(inc41, "inc41", "conv1")
graph.add_node(inc42, "inc42", "inc41")
graph.add_node(inc43, "inc43", "inc42")
graph.add_node(inc51, "inc51", "inc43")
graph.add_node(inc52, "inc52", "inc51")
graph.add_node(inc53, "inc53", "inc52")
graph.add_node(inc54, "inc54", "inc53")
graph.add_node(inc55, "inc55", "inc54")
graph.add_node(inc61, "inc61", "inc55")
graph.add_node(inc62, "inc62", "inc61")
graph.add_node(inc63, "inc63", "inc62")
graph.add_output("output", "inc63")
print "out " + str(graph.output_shape)
return graph
graph = create_model()
graph.compile(optimizer='rmsprop', loss={'output':'mse'})
print graph
| unlicense | -3,629,595,735,044,102,700 | 40.937107 | 123 | 0.671416 | false |
cloudera/hue | desktop/core/ext-py/celery-4.2.1/t/unit/bin/test_base.py | 2 | 12976 | from __future__ import absolute_import, unicode_literals
import os
import pytest
from case import Mock, mock, patch
from celery.bin.base import Command, Extensions, Option
from celery.five import bytes_if_py2
class MyApp(object):
user_options = {'preload': None}
APP = MyApp() # <-- Used by test_with_custom_app
class MockCommand(Command):
mock_args = ('arg1', 'arg2', 'arg3')
def parse_options(self, prog_name, arguments, command=None):
options = {'foo': 'bar', 'prog_name': prog_name}
return options, self.mock_args
def run(self, *args, **kwargs):
return args, kwargs
class test_Extensions:
def test_load(self):
with patch('pkg_resources.iter_entry_points') as iterep:
with patch('celery.utils.imports.symbol_by_name') as symbyname:
ep = Mock()
ep.name = 'ep'
ep.module_name = 'foo'
ep.attrs = ['bar', 'baz']
iterep.return_value = [ep]
cls = symbyname.return_value = Mock()
register = Mock()
e = Extensions('unit', register)
e.load()
symbyname.assert_called_with('foo:bar')
register.assert_called_with(cls, name='ep')
with patch('celery.utils.imports.symbol_by_name') as symbyname:
symbyname.side_effect = SyntaxError()
with patch('warnings.warn') as warn:
e.load()
warn.assert_called()
with patch('celery.utils.imports.symbol_by_name') as symbyname:
symbyname.side_effect = KeyError('foo')
with pytest.raises(KeyError):
e.load()
class test_Command:
def test_get_options(self):
cmd = Command()
cmd.option_list = (1, 2, 3)
assert cmd.get_options() == (1, 2, 3)
def test_custom_description(self):
class C(Command):
description = 'foo'
c = C()
assert c.description == 'foo'
def test_format_epilog(self):
assert Command()._format_epilog('hello')
assert not Command()._format_epilog('')
def test_format_description(self):
assert Command()._format_description('hello')
def test_register_callbacks(self):
c = Command(on_error=8, on_usage_error=9)
assert c.on_error == 8
assert c.on_usage_error == 9
def test_run_raises_UsageError(self):
cb = Mock()
c = Command(on_usage_error=cb)
c.verify_args = Mock()
c.run = Mock()
exc = c.run.side_effect = c.UsageError('foo', status=3)
assert c() == exc.status
cb.assert_called_with(exc)
c.verify_args.assert_called_with(())
def test_default_on_usage_error(self):
cmd = Command()
cmd.handle_error = Mock()
exc = Exception()
cmd.on_usage_error(exc)
cmd.handle_error.assert_called_with(exc)
def test_verify_args_missing(self):
c = Command()
def run(a, b, c):
pass
c.run = run
with pytest.raises(c.UsageError):
c.verify_args((1,))
c.verify_args((1, 2, 3))
def test_run_interface(self):
with pytest.raises(NotImplementedError):
Command().run()
@patch('sys.stdout')
def test_early_version(self, stdout):
cmd = Command()
with pytest.raises(SystemExit):
cmd.early_version(['--version'])
def test_execute_from_commandline(self, app):
cmd = MockCommand(app=app)
args1, kwargs1 = cmd.execute_from_commandline() # sys.argv
assert args1 == cmd.mock_args
assert kwargs1['foo'] == 'bar'
assert kwargs1.get('prog_name')
args2, kwargs2 = cmd.execute_from_commandline(['foo']) # pass list
assert args2 == cmd.mock_args
assert kwargs2['foo'] == 'bar'
assert kwargs2['prog_name'] == 'foo'
def test_with_bogus_args(self, app):
with mock.stdouts() as (_, stderr):
cmd = MockCommand(app=app)
cmd.supports_args = False
with pytest.raises(SystemExit):
cmd.execute_from_commandline(argv=['--bogus'])
assert stderr.getvalue()
assert 'Unrecognized' in stderr.getvalue()
def test_with_custom_config_module(self, app):
prev = os.environ.pop('CELERY_CONFIG_MODULE', None)
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--config=foo.bar.baz'])
assert os.environ.get('CELERY_CONFIG_MODULE') == 'foo.bar.baz'
finally:
if prev:
os.environ['CELERY_CONFIG_MODULE'] = prev
else:
os.environ.pop('CELERY_CONFIG_MODULE', None)
def test_with_custom_broker(self, app):
prev = os.environ.pop('CELERY_BROKER_URL', None)
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--broker=xyzza://'])
assert os.environ.get('CELERY_BROKER_URL') == 'xyzza://'
finally:
if prev:
os.environ['CELERY_BROKER_URL'] = prev
else:
os.environ.pop('CELERY_BROKER_URL', None)
def test_with_custom_result_backend(self, app):
prev = os.environ.pop('CELERY_RESULT_BACKEND', None)
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--result-backend=xyzza://'])
assert os.environ.get('CELERY_RESULT_BACKEND') == 'xyzza://'
finally:
if prev:
os.environ['CELERY_RESULT_BACKEND'] = prev
else:
os.environ.pop('CELERY_RESULT_BACKEND', None)
def test_with_custom_app(self, app):
cmd = MockCommand(app=app)
appstr = '.'.join([__name__, 'APP'])
cmd.setup_app_from_commandline(['--app=%s' % (appstr,),
'--loglevel=INFO'])
assert cmd.app is APP
cmd.setup_app_from_commandline(['-A', appstr,
'--loglevel=INFO'])
assert cmd.app is APP
def test_setup_app_sets_quiet(self, app):
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['-q'])
assert cmd.quiet
cmd2 = MockCommand(app=app)
cmd2.setup_app_from_commandline(['--quiet'])
assert cmd2.quiet
def test_setup_app_sets_chdir(self, app):
with patch('os.chdir') as chdir:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--workdir=/opt'])
chdir.assert_called_with('/opt')
def test_setup_app_sets_loader(self, app):
prev = os.environ.get('CELERY_LOADER')
try:
cmd = MockCommand(app=app)
cmd.setup_app_from_commandline(['--loader=X.Y:Z'])
assert os.environ['CELERY_LOADER'] == 'X.Y:Z'
finally:
if prev is not None:
os.environ['CELERY_LOADER'] = prev
else:
del(os.environ['CELERY_LOADER'])
def test_setup_app_no_respect(self, app):
cmd = MockCommand(app=app)
cmd.respects_app_option = False
with patch('celery.bin.base.Celery') as cp:
cmd.setup_app_from_commandline(['--app=x.y:z'])
cp.assert_called()
def test_setup_app_custom_app(self, app):
cmd = MockCommand(app=app)
app = cmd.app = Mock()
app.user_options = {'preload': None}
cmd.setup_app_from_commandline([])
assert cmd.app == app
def test_find_app_suspects(self, app):
cmd = MockCommand(app=app)
assert cmd.find_app('t.unit.bin.proj.app')
assert cmd.find_app('t.unit.bin.proj')
assert cmd.find_app('t.unit.bin.proj:hello')
assert cmd.find_app('t.unit.bin.proj.hello')
assert cmd.find_app('t.unit.bin.proj.app:app')
assert cmd.find_app('t.unit.bin.proj.app.app')
with pytest.raises(AttributeError):
cmd.find_app('t.unit.bin')
with pytest.raises(AttributeError):
cmd.find_app(__name__)
def test_ask(self, app, patching):
try:
input = patching('celery.bin.base.input')
except AttributeError:
input = patching('builtins.input')
cmd = MockCommand(app=app)
input.return_value = 'yes'
assert cmd.ask('q', ('yes', 'no'), 'no') == 'yes'
input.return_value = 'nop'
assert cmd.ask('q', ('yes', 'no'), 'no') == 'no'
def test_host_format(self, app):
cmd = MockCommand(app=app)
with patch('celery.utils.nodenames.gethostname') as hn:
hn.return_value = 'blacktron.example.com'
assert cmd.host_format('') == ''
assert (cmd.host_format('celery@%h') ==
'[email protected]')
assert cmd.host_format('celery@%d') == '[email protected]'
assert cmd.host_format('celery@%n') == 'celery@blacktron'
def test_say_chat_quiet(self, app):
cmd = MockCommand(app=app)
cmd.quiet = True
assert cmd.say_chat('<-', 'foo', 'foo') is None
def test_say_chat_show_body(self, app):
cmd = MockCommand(app=app)
cmd.out = Mock()
cmd.show_body = True
cmd.say_chat('->', 'foo', 'body')
cmd.out.assert_called_with('body')
def test_say_chat_no_body(self, app):
cmd = MockCommand(app=app)
cmd.out = Mock()
cmd.show_body = False
cmd.say_chat('->', 'foo', 'body')
@pytest.mark.usefixtures('depends_on_current_app')
def test_with_cmdline_config(self, app):
cmd = MockCommand(app=app)
cmd.enable_config_from_cmdline = True
cmd.namespace = 'worker'
rest = cmd.setup_app_from_commandline(argv=[
'--loglevel=INFO', '--',
'result.backend=redis://backend.example.com',
'broker.url=amqp://broker.example.com',
'.prefetch_multiplier=100'])
assert cmd.app.conf.result_backend == 'redis://backend.example.com'
assert cmd.app.conf.broker_url == 'amqp://broker.example.com'
assert cmd.app.conf.worker_prefetch_multiplier == 100
assert rest == ['--loglevel=INFO']
cmd.app = None
cmd.get_app = Mock(name='get_app')
cmd.get_app.return_value = app
app.user_options['preload'] = [
Option('--foo', action='store_true'),
]
cmd.setup_app_from_commandline(argv=[
'--foo', '--loglevel=INFO', '--',
'broker.url=amqp://broker.example.com',
'.prefetch_multiplier=100'])
assert cmd.app is cmd.get_app()
def test_get_default_app(self, app, patching):
patching('celery._state.get_current_app')
cmd = MockCommand(app=app)
from celery._state import get_current_app
assert cmd._get_default_app() is get_current_app()
def test_set_colored(self, app):
cmd = MockCommand(app=app)
cmd.colored = 'foo'
assert cmd.colored == 'foo'
def test_set_no_color(self, app):
cmd = MockCommand(app=app)
cmd.no_color = False
_ = cmd.colored # noqa
cmd.no_color = True
assert not cmd.colored.enabled
def test_find_app(self, app):
cmd = MockCommand(app=app)
with patch('celery.utils.imports.symbol_by_name') as sbn:
from types import ModuleType
x = ModuleType(bytes_if_py2('proj'))
def on_sbn(*args, **kwargs):
def after(*args, **kwargs):
x.app = 'quick brown fox'
x.__path__ = None
return x
sbn.side_effect = after
return x
sbn.side_effect = on_sbn
x.__path__ = [True]
assert cmd.find_app('proj') == 'quick brown fox'
def test_parse_preload_options_shortopt(self):
class TestCommand(Command):
def add_preload_arguments(self, parser):
parser.add_argument('-s', action='store', dest='silent')
cmd = TestCommand()
acc = cmd.parse_preload_options(['-s', 'yes'])
assert acc.get('silent') == 'yes'
def test_parse_preload_options_with_equals_and_append(self):
class TestCommand(Command):
def add_preload_arguments(self, parser):
parser.add_argument('--zoom', action='append', default=[])
cmd = Command()
acc = cmd.parse_preload_options(['--zoom=1', '--zoom=2'])
assert acc, {'zoom': ['1' == '2']}
def test_parse_preload_options_without_equals_and_append(self):
cmd = Command()
opt = Option('--zoom', action='append', default=[])
cmd.preload_options = (opt,)
acc = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2'])
assert acc, {'zoom': ['1' == '2']}
| apache-2.0 | -4,420,767,453,853,366,000 | 33.695187 | 76 | 0.551788 | false |
mudbungie/NetExplorer | env/share/doc/networkx-1.11/examples/advanced/parallel_betweenness.py | 51 | 2510 | """
Example of parallel implementation of betweenness centrality using the
multiprocessing module from Python Standard Library.
The function betweenness centrality accepts a bunch of nodes and computes
the contribution of those nodes to the betweenness centrality of the whole
network. Here we divide the network in chunks of nodes and we compute their
contribution to the betweenness centrality of the whole network.
"""
from multiprocessing import Pool
import time
import itertools
import networkx as nx
def chunks(l, n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c, n))
if not x:
return
yield x
def _betmap(G_normalized_weight_sources_tuple):
"""Pool for multiprocess only accepts functions with one argument.
This function uses a tuple as its only argument. We use a named tuple for
python 3 compatibility, and then unpack it when we send it to
`betweenness_centrality_source`
"""
return nx.betweenness_centrality_source(*G_normalized_weight_sources_tuple)
def betweenness_centrality_parallel(G, processes=None):
"""Parallel betweenness centrality function"""
p = Pool(processes=processes)
node_divisor = len(p._pool)*4
node_chunks = list(chunks(G.nodes(), int(G.order()/node_divisor)))
num_chunks = len(node_chunks)
bt_sc = p.map(_betmap,
zip([G]*num_chunks,
[True]*num_chunks,
[None]*num_chunks,
node_chunks))
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return bt_c
if __name__ == "__main__":
G_ba = nx.barabasi_albert_graph(1000, 3)
G_er = nx.gnp_random_graph(1000, 0.01)
G_ws = nx.connected_watts_strogatz_graph(1000, 4, 0.1)
for G in [G_ba, G_er, G_ws]:
print("")
print("Computing betweenness centrality for:")
print(nx.info(G))
print("\tParallel version")
start = time.time()
bt = betweenness_centrality_parallel(G)
print("\t\tTime: %.4F" % (time.time()-start))
print("\t\tBetweenness centrality for node 0: %.5f" % (bt[0]))
print("\tNon-Parallel version")
start = time.time()
bt = nx.betweenness_centrality(G)
print("\t\tTime: %.4F seconds" % (time.time()-start))
print("\t\tBetweenness centrality for node 0: %.5f" % (bt[0]))
print("")
| mit | -3,663,657,029,364,220,000 | 33.383562 | 79 | 0.62988 | false |
vmturbo/nova | nova/cmd/idmapshift.py | 9 | 7512 | # Copyright 2014 Rackspace, Andrew Melton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
##########
IDMapShift
##########
IDMapShift is a tool that properly sets the ownership of a filesystem for use
with linux user namespaces.
=====
Usage
=====
nova-idmapshift -i -u 0:10000:2000 -g 0:10000:2000 path
This command will idempotently shift `path` to proper ownership using
the provided uid and gid mappings.
=========
Arguments
=========
nova-idmapshift -i -c -d -v
-u [[guest-uid:host-uid:count],...]
-g [[guest-gid:host-gid:count],...]
-n [nobody-id]
path
path: Root path of the filesystem to be shifted
-i, --idempotent: Shift operation will only be performed if filesystem
appears unshifted
-c, --confirm: Will perform check on filesystem
Returns 0 when filesystem appears shifted
Returns 1 when filesystem appears unshifted
-d, --dry-run: Print chown operations, but won't perform them
-v, --verbose: Print chown operations while performing them
-u, --uid: User ID mappings, maximum of 3 ranges
-g, --gid: Group ID mappings, maximum of 3 ranges
-n, --nobody: ID to map all unmapped uid and gids to.
=======
Purpose
=======
When using user namespaces with linux containers, the filesystem of the
container must be owned by the targeted user and group ids being applied
to that container. Otherwise, processes inside the container won't be able
to access the filesystem.
For example, when using the id map string '0:10000:2000', this means that
user ids inside the container between 0 and 1999 will map to user ids on
the host between 10000 and 11999. Root (0) becomes 10000, user 1 becomes
10001, user 50 becomes 10050 and user 1999 becomes 11999. This means that
files that are owned by root need to actually be owned by user 10000, and
files owned by 50 need to be owned by 10050, and so on.
IDMapShift will take the uid and gid strings used for user namespaces and
properly set up the filesystem for use by those users. Uids and gids outside
of provided ranges will be mapped to nobody (max uid/gid) so that they are
inaccessible inside the container.
"""
import argparse
import os
import sys
from nova.i18n import _
NOBODY_ID = 65534
def find_target_id(fsid, mappings, nobody, memo):
if fsid not in memo:
for start, target, count in mappings:
if start <= fsid < start + count:
memo[fsid] = (fsid - start) + target
break
else:
memo[fsid] = nobody
return memo[fsid]
def print_chown(path, uid, gid, target_uid, target_gid):
print('%s %s:%s -> %s:%s' % (path, uid, gid, target_uid, target_gid))
def shift_path(path, uid_mappings, gid_mappings, nobody, uid_memo, gid_memo,
dry_run=False, verbose=False):
stat = os.lstat(path)
uid = stat.st_uid
gid = stat.st_gid
target_uid = find_target_id(uid, uid_mappings, nobody, uid_memo)
target_gid = find_target_id(gid, gid_mappings, nobody, gid_memo)
if verbose:
print_chown(path, uid, gid, target_uid, target_gid)
if not dry_run:
os.lchown(path, target_uid, target_gid)
def shift_dir(fsdir, uid_mappings, gid_mappings, nobody,
dry_run=False, verbose=False):
uid_memo = dict()
gid_memo = dict()
def shift_path_short(p):
shift_path(p, uid_mappings, gid_mappings, nobody,
dry_run=dry_run, verbose=verbose,
uid_memo=uid_memo, gid_memo=gid_memo)
shift_path_short(fsdir)
for root, dirs, files in os.walk(fsdir):
for d in dirs:
path = os.path.join(root, d)
shift_path_short(path)
for f in files:
path = os.path.join(root, f)
shift_path_short(path)
def confirm_path(path, uid_ranges, gid_ranges, nobody):
stat = os.lstat(path)
uid = stat.st_uid
gid = stat.st_gid
uid_in_range = True if uid == nobody else False
gid_in_range = True if gid == nobody else False
if not uid_in_range or not gid_in_range:
for (start, end) in uid_ranges:
if start <= uid <= end:
uid_in_range = True
break
for (start, end) in gid_ranges:
if start <= gid <= end:
gid_in_range = True
break
return uid_in_range and gid_in_range
def get_ranges(maps):
return [(target, target + count - 1) for (start, target, count) in maps]
def confirm_dir(fsdir, uid_mappings, gid_mappings, nobody):
uid_ranges = get_ranges(uid_mappings)
gid_ranges = get_ranges(gid_mappings)
if not confirm_path(fsdir, uid_ranges, gid_ranges, nobody):
return False
for root, dirs, files in os.walk(fsdir):
for d in dirs:
path = os.path.join(root, d)
if not confirm_path(path, uid_ranges, gid_ranges, nobody):
return False
for f in files:
path = os.path.join(root, f)
if not confirm_path(path, uid_ranges, gid_ranges, nobody):
return False
return True
def id_map_type(val):
maps = val.split(',')
id_maps = []
for m in maps:
map_vals = m.split(':')
if len(map_vals) != 3:
msg = ('Invalid id map %s, correct syntax is '
'guest-id:host-id:count.')
raise argparse.ArgumentTypeError(msg % val)
try:
vals = [int(i) for i in map_vals]
except ValueError:
msg = 'Invalid id map %s, values must be integers' % val
raise argparse.ArgumentTypeError(msg)
id_maps.append(tuple(vals))
return id_maps
def main():
parser = argparse.ArgumentParser(
description=_('nova-idmapshift is a tool that properly '
'sets the ownership of a filesystem for '
'use with linux user namespaces. '
'This tool can only be used with linux '
'lxc containers. See the man page for '
'details.'))
parser.add_argument('path')
parser.add_argument('-u', '--uid', type=id_map_type, default=[])
parser.add_argument('-g', '--gid', type=id_map_type, default=[])
parser.add_argument('-n', '--nobody', default=NOBODY_ID, type=int)
parser.add_argument('-i', '--idempotent', action='store_true')
parser.add_argument('-c', '--confirm', action='store_true')
parser.add_argument('-d', '--dry-run', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.idempotent or args.confirm:
if confirm_dir(args.path, args.uid, args.gid, args.nobody):
sys.exit(0)
else:
if args.confirm:
sys.exit(1)
shift_dir(args.path, args.uid, args.gid, args.nobody,
dry_run=args.dry_run, verbose=args.verbose)
| apache-2.0 | -1,965,350,524,618,179,800 | 31.37931 | 77 | 0.617279 | false |
gundalow/ansible-modules-extras | cloud/cloudstack/cs_configuration.py | 32 | 8629 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_configuration
short_description: Manages configuration on Apache CloudStack based clouds.
description:
- Manages global, zone, account, storage and cluster configurations.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the configuration.
required: true
value:
description:
- Value of the configuration.
required: true
account:
description:
- Ensure the value for corresponding account.
required: false
default: null
domain:
description:
- Domain the account is related to.
- Only considered if C(account) is used.
required: false
default: ROOT
zone:
description:
- Ensure the value for corresponding zone.
required: false
default: null
storage:
description:
- Ensure the value for corresponding storage pool.
required: false
default: null
cluster:
description:
- Ensure the value for corresponding cluster.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure global configuration
- local_action:
module: cs_configuration
name: router.reboot.when.outofband.migrated
value: false
# Ensure zone configuration
- local_action:
module: cs_configuration
name: router.reboot.when.outofband.migrated
zone: ch-gva-01
value: true
# Ensure storage configuration
- local_action:
module: cs_configuration
name: storage.overprovisioning.factor
storage: storage01
value: 2.0
# Ensure account configuration
- local_action:
module: cs_configuration:
name: allow.public.user.templates
value: false
account: acme inc
domain: customers
'''
RETURN = '''
---
category:
description: Category of the configuration.
returned: success
type: string
sample: Advanced
scope:
description: Scope (zone/cluster/storagepool/account) of the parameter that needs to be updated.
returned: success
type: string
sample: storagepool
description:
description: Description of the configuration.
returned: success
type: string
sample: Setup the host to do multipath
name:
description: Name of the configuration.
returned: success
type: string
sample: zone.vlan.capacity.notificationthreshold
value:
description: Value of the configuration.
returned: success
type: string
sample: "0.75"
account:
description: Account of the configuration.
returned: success
type: string
sample: admin
Domain:
description: Domain of account of the configuration.
returned: success
type: string
sample: ROOT
zone:
description: Zone of the configuration.
returned: success
type: string
sample: ch-gva-01
cluster:
description: Cluster of the configuration.
returned: success
type: string
sample: cluster01
storage:
description: Storage of the configuration.
returned: success
type: string
sample: storage01
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackConfiguration(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackConfiguration, self).__init__(module)
self.returns = {
'category': 'category',
'scope': 'scope',
'value': 'value',
}
self.storage = None
self.account = None
self.cluster = None
def _get_common_configuration_args(self):
args = {}
args['name'] = self.module.params.get('name')
args['accountid'] = self.get_account(key='id')
args['storageid'] = self.get_storage(key='id')
args['zoneid'] = self.get_zone(key='id')
args['clusterid'] = self.get_cluster(key='id')
return args
def get_zone(self, key=None):
# make sure we do net use the default zone
zone = self.module.params.get('zone')
if zone:
return super(AnsibleCloudStackConfiguration, self).get_zone(key=key)
def get_cluster(self, key=None):
if not self.cluster:
cluster_name = self.module.params.get('cluster')
if not cluster_name:
return None
args = {}
args['name'] = cluster_name
clusters = self.cs.listClusters(**args)
if clusters:
self.cluster = clusters['cluster'][0]
self.result['cluster'] = self.cluster['name']
else:
self.module.fail_json(msg="Cluster %s not found." % cluster_name)
return self._get_by_key(key=key, my_dict=self.cluster)
def get_storage(self, key=None):
if not self.storage:
storage_pool_name = self.module.params.get('storage')
if not storage_pool_name:
return None
args = {}
args['name'] = storage_pool_name
storage_pools = self.cs.listStoragePools(**args)
if storage_pools:
self.storage = storage_pools['storagepool'][0]
self.result['storage'] = self.storage['name']
else:
self.module.fail_json(msg="Storage pool %s not found." % storage_pool_name)
return self._get_by_key(key=key, my_dict=self.storage)
def get_configuration(self):
configuration = None
args = self._get_common_configuration_args()
configurations = self.cs.listConfigurations(**args)
if not configurations:
self.module.fail_json(msg="Configuration %s not found." % args['name'])
configuration = configurations['configuration'][0]
return configuration
def get_value(self):
value = str(self.module.params.get('value'))
if value in ('True', 'False'):
value = value.lower()
return value
def present_configuration(self):
configuration = self.get_configuration()
args = self._get_common_configuration_args()
args['value'] = self.get_value()
if self.has_changed(args, configuration, ['value']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateConfiguration(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
configuration = res['configuration']
return configuration
def get_result(self, configuration):
self.result = super(AnsibleCloudStackConfiguration, self).get_result(configuration)
if self.account:
self.result['account'] = self.account['name']
self.result['domain'] = self.domain['path']
elif self.zone:
self.result['zone'] = self.zone['name']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
value = dict(type='str', required=True),
zone = dict(default=None),
storage = dict(default=None),
cluster = dict(default=None),
account = dict(default=None),
domain = dict(default='ROOT')
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_configuration = AnsibleCloudStackConfiguration(module)
configuration = acs_configuration.present_configuration()
result = acs_configuration.get_result(configuration)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -5,238,826,330,936,015,000 | 28.544521 | 98 | 0.64217 | false |
Intel-bigdata/SSM | supports/integration-test/ssm_generate_test_data.py | 4 | 3241 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script will be used to create test data set. It is also called by:
- test_small_file_rule.py
- test_small_file_actions.py
"""
import sys
import ast
import os
import re
import argparse
from util import *
def create_test_set(file_set_nums, file_size, base_dir, debug):
created_files = []
cids = []
size_in_byte = file_size * 1024
for i in file_set_nums:
if debug:
print("DEBUG: Current file set number: " + str(i) + "; each file size: " + str(file_size) + "KB")
created_files_dir = base_dir + os.sep + "data_" + str(i)
for j in range(0, i):
file_name = created_files_dir + os.sep + "file_" + str(j)
cid = create_file(file_name, size_in_byte)
cids.append(cid)
created_files.append("'" + file_name + "'")
if debug:
print("**********Action " + str(cid) + " Submitted**********")
wait_for_cmdlets(cids)
time.sleep(1)
return "[" + ','.join(created_files) + "]"
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(description='Generate test data set for SSM.')
parser.add_argument("-b", "--dataSetNums", default='[10]', dest="dataSetNums",
help="file number of test data sets, string input, e.g. '[10,100,1000]', Default Value: [10].")
parser.add_argument("-s", "--fileSize", default='1MB', dest="fileSize",
help="size of each file, e.g. 10MB, 10KB, default unit KB, Default Value 1KB.")
parser.add_argument("-d", "--testDir", default=TEST_DIR, dest="testDir",
help="Test data set directory, Default Value: TEST_DIR in util.py")
parser.add_argument("--debug", nargs='?', const=1, default=0, dest="debug",
help="print debug info, Default Value: 0")
options = parser.parse_args()
# Convert arguments
try:
DEBUG = options.debug
data_set_nums = ast.literal_eval(options.dataSetNums)
file_size_arg = options.fileSize
m = re.match(r"(\d+)(\w{2}).*", file_size_arg)
if m:
size = int(m.group(1))
sizeUnit = m.group(2)
if sizeUnit != "MB" and sizeUnit != "KB":
print("Wrong Size Unit")
print("Usage: python3 ssm_generate_test_data -h")
sys.exit(1)
if sizeUnit == "MB":
size = size * 1024
else:
print("Wrong Size Input, e.g. 1MB or 1KB")
sys.exit(1)
if options.testDir:
if options.testDir[-1:len(options.testDirPre)] == '/':
test_dir_prefix = options.testDir[:-1]
else:
test_dir_prefix = options.testDir
else:
raise SystemExit
if DEBUG:
print("DEBUG: file set nums: " + options.fileSetNums + ", each file size: " + str(size) + sizeUnit
+ ", test data directory prefix: " + test_dir_prefix)
except (ValueError, SystemExit) as e:
print("Usage: python3 ssm_generate_test_data -h")
except IndexError:
pass
create_test_set(data_set_nums, size, test_dir_prefix, DEBUG)
| apache-2.0 | 7,959,431,885,895,326,000 | 37.583333 | 119 | 0.551373 | false |
OpenPIV/openpiv-python | openpiv/pyprocess.py | 2 | 29417 | import numpy.lib.stride_tricks
import numpy as np
from scipy.fft import rfft2, irfft2, fftshift
from numpy import ma
from scipy.signal import convolve2d
from numpy import log
"""This module contains a pure python implementation of the basic
cross-correlation algorithm for PIV image processing."""
__licence_ = """
Copyright (C) 2011 www.openpiv.net
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
def get_coordinates(image_size, search_area_size, overlap):
"""Compute the x, y coordinates of the centers of the interrogation windows.
the origin (0,0) is like in the image, top left corner
positive x is an increasing column index from left to right
positive y is increasing row index, from top to bottom
Parameters
----------
image_size: two elements tuple
a two dimensional tuple for the pixel size of the image
first element is number of rows, second element is
the number of columns.
search_area_size: int
the size of the search area windows, sometimes it's equal to
the interrogation window size in both frames A and B
overlap: int = 0 (default is no overlap)
the number of pixel by which two adjacent interrogation
windows overlap.
Returns
-------
x : 2d np.ndarray
a two dimensional array containing the x coordinates of the
interrogation window centers, in pixels.
y : 2d np.ndarray
a two dimensional array containing the y coordinates of the
interrogation window centers, in pixels.
Coordinate system 0,0 is at the top left corner, positive
x to the right, positive y from top downwards, i.e.
image coordinate system
"""
# get shape of the resulting flow field
field_shape = get_field_shape(image_size,
search_area_size,
overlap)
# compute grid coordinates of the search area window centers
# note the field_shape[1] (columns) for x
x = (
np.arange(field_shape[1]) * (search_area_size - overlap)
+ (search_area_size) / 2.0
)
# note the rows in field_shape[0]
y = (
np.arange(field_shape[0]) * (search_area_size - overlap)
+ (search_area_size) / 2.0
)
# moving coordinates further to the center, so that the points at the
# extreme left/right or top/bottom
# have the same distance to the window edges. For simplicity only integer
# movements are allowed.
x += (
image_size[1]
- 1
- ((field_shape[1] - 1) * (search_area_size - overlap) +
(search_area_size - 1))
) // 2
y += (
image_size[0] - 1
- ((field_shape[0] - 1) * (search_area_size - overlap) +
(search_area_size - 1))
) // 2
# the origin 0,0 is at top left
# the units are pixels
return np.meshgrid(x, y)
def get_field_shape(image_size, search_area_size, overlap):
"""Compute the shape of the resulting flow field.
Given the image size, the interrogation window size and
the overlap size, it is possible to calculate the number
of rows and columns of the resulting flow field.
Parameters
----------
image_size: two elements tuple
a two dimensional tuple for the pixel size of the image
first element is number of rows, second element is
the number of columns, easy to obtain using .shape
search_area_size: tuple
the size of the interrogation windows (if equal in frames A,B)
or the search area (in frame B), the largest of the two
overlap: tuple
the number of pixel by which two adjacent interrogation
windows overlap.
Returns
-------
field_shape : three elements tuple
the shape of the resulting flow field
"""
field_shape = (np.array(image_size) - np.array(search_area_size)) // (
np.array(search_area_size) - np.array(overlap)
) + 1
return field_shape
def moving_window_array(array, window_size, overlap):
"""
This is a nice numpy trick. The concept of numpy strides should be
clear to understand this code.
Basically, we have a 2d array and we want to perform cross-correlation
over the interrogation windows. An approach could be to loop over the array
but loops are expensive in python. So we create from the array a new array
with three dimension, of size (n_windows, window_size, window_size), in
which each slice, (along the first axis) is an interrogation window.
"""
sz = array.itemsize
shape = array.shape
array = np.ascontiguousarray(array)
strides = (
sz * shape[1] * (window_size - overlap),
sz * (window_size - overlap),
sz * shape[1],
sz,
)
shape = (
int((shape[0] - window_size) / (window_size - overlap)) + 1,
int((shape[1] - window_size) / (window_size - overlap)) + 1,
window_size,
window_size,
)
return numpy.lib.stride_tricks.as_strided(
array, strides=strides, shape=shape
).reshape(-1, window_size, window_size)
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map fof the strided images (N,K,M) where
N is the number of windows, KxM is the interrogation window size
Returns
-------
(i,j) : integers, index of the peak position
peak : amplitude of the peak
"""
return np.unravel_index(np.argmax(corr), corr.shape), corr.max()
def find_second_peak(corr, i=None, j=None, width=2):
"""
Find the value of the second largest peak.
The second largest peak is the height of the peak in
the region outside a 3x3 submatrxi around the first
correlation peak.
Parameters
----------
corr: np.ndarray
the correlation map.
i,j : ints
row and column location of the first peak.
width : int
the half size of the region around the first correlation
peak to ignore for finding the second peak.
Returns
-------
i : int
the row index of the second correlation peak.
j : int
the column index of the second correlation peak.
corr_max2 : int
the value of the second correlation peak.
"""
if i is None or j is None:
(i, j), tmp = find_first_peak(corr)
# create a masked view of the corr
tmp = corr.view(ma.MaskedArray)
# set width x width square submatrix around the first correlation peak as
# masked.
# Before check if we are not too close to the boundaries, otherwise we
# have negative indices
iini = max(0, i - width)
ifin = min(i + width + 1, corr.shape[0])
jini = max(0, j - width)
jfin = min(j + width + 1, corr.shape[1])
tmp[iini:ifin, jini:jfin] = ma.masked
(i, j), corr_max2 = find_first_peak(tmp)
return (i, j), corr_max2
def find_subpixel_peak_position(corr, subpixel_method="gaussian"):
"""
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the
peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
If the first peak is on the border of the correlation map
or any other problem, the returned result is a tuple of NaNs.
"""
# initialization
# default_peak_position = (np.floor(corr.shape[0] / 2.),
# np.floor(corr.shape[1] / 2.))
# default_peak_position = np.array([0,0])
eps = 1e-7
# subp_peak_position = tuple(np.floor(np.array(corr.shape)/2))
subp_peak_position = (np.nan, np.nan) # any wrong position will mark nan
# check inputs
if subpixel_method not in ("gaussian", "centroid", "parabolic"):
raise ValueError(f"Method not implemented {subpixel_method}")
# the peak locations
(peak1_i, peak1_j), _ = find_first_peak(corr)
# import pdb; pdb.set_trace()
# the peak and its neighbours: left, right, down, up
# but we have to make sure that peak is not at the border
# @ErichZimmer noticed this bug for the small windows
if ((peak1_i == 0) | (peak1_i == corr.shape[0]-1) |
(peak1_j == 0) | (peak1_j == corr.shape[1]-1)):
return subp_peak_position
else:
corr += eps # prevents log(0) = nan if "gaussian" is used (notebook)
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i - 1, peak1_j]
cr = corr[peak1_i + 1, peak1_j]
cd = corr[peak1_i, peak1_j - 1]
cu = corr[peak1_i, peak1_j + 1]
# gaussian fit
if np.logical_and(np.any(np.array([c, cl, cr, cd, cu]) < 0),
subpixel_method == "gaussian"):
subpixel_method = "parabolic"
# try:
if subpixel_method == "centroid":
subp_peak_position = (
((peak1_i - 1) * cl + peak1_i * c + (peak1_i + 1) * cr) /
(cl + c + cr),
((peak1_j - 1) * cd + peak1_j * c + (peak1_j + 1) * cu) /
(cd + c + cu),
)
elif subpixel_method == "gaussian":
nom1 = log(cl) - log(cr)
den1 = 2 * log(cl) - 4 * log(c) + 2 * log(cr)
nom2 = log(cd) - log(cu)
den2 = 2 * log(cd) - 4 * log(c) + 2 * log(cu)
subp_peak_position = (
peak1_i + np.divide(nom1, den1, out=np.zeros(1),
where=(den1 != 0.0))[0],
peak1_j + np.divide(nom2, den2, out=np.zeros(1),
where=(den2 != 0.0))[0],
)
elif subpixel_method == "parabolic":
subp_peak_position = (
peak1_i + (cl - cr) / (2 * cl - 4 * c + 2 * cr),
peak1_j + (cd - cu) / (2 * cd - 4 * c + 2 * cu),
)
return subp_peak_position
def sig2noise_ratio(correlation, sig2noise_method="peak2peak", width=2):
"""
Computes the signal to noise ratio from the correlation map.
The signal to noise ratio is computed from the correlation map with
one of two available method. It is a measure of the quality of the
matching between to interrogation windows.
Parameters
----------
corr : 3d np.ndarray
the correlation maps of the image pair, concatenated along 0th axis
sig2noise_method: string
the method for evaluating the signal to noise ratio value from
the correlation map. Can be `peak2peak`, `peak2mean` or None
if no evaluation should be made.
width : int, optional
the half size of the region around the first
correlation peak to ignore for finding the second
peak. [default: 2]. Only used if ``sig2noise_method==peak2peak``.
Returns
-------
sig2noise : np.array
the signal to noise ratios from the correlation maps.
"""
sig2noise = np.zeros(correlation.shape[0])
corr_max1 = np.zeros(correlation.shape[0])
corr_max2 = np.zeros(correlation.shape[0])
if sig2noise_method == "peak2peak":
for i, corr in enumerate(correlation):
# compute first peak position
(peak1_i, peak1_j), corr_max1[i] = find_first_peak(corr)
condition = (
corr_max1[i] < 1e-3
or peak1_i == 0
or peak1_j == corr.shape[0]
or peak1_j == 0
or peak1_j == corr.shape[1]
)
if condition:
# return zero, since we have no signal.
# no point to get the second peak, save time
sig2noise[i] = 0.0
else:
# find second peak height
(peak2_i, peak2_j), corr_max2 = find_second_peak(
corr, peak1_i, peak1_j, width=width
)
condition = (
corr_max2 == 0
or peak2_i == 0
or peak2_j == corr.shape[0]
or peak2_j == 0
or peak2_j == corr.shape[1]
)
if condition: # mark failed peak2
corr_max2 = np.nan
sig2noise[i] = corr_max1[i] / corr_max2
elif sig2noise_method == "peak2mean": # only one loop
for i, corr in enumerate(correlation):
# compute first peak position
(peak1_i, peak1_j), corr_max1[i] = find_first_peak(corr)
condition = (
corr_max1[i] < 1e-3
or peak1_i == 0
or peak1_j == corr.shape[0]
or peak1_j == 0
or peak1_j == corr.shape[1]
)
if condition:
# return zero, since we have no signal.
# no point to get the second peak, save time
sig2noise[i] = 0.0
# find means of all the correlation maps
corr_max2 = np.abs(correlation.mean(axis=(-2, -1)))
corr_max2[corr_max2 == 0] = np.nan # mark failed ones
sig2noise = corr_max1 / corr_max2
else:
raise ValueError("wrong sig2noise_method")
# sig2noise is zero for all failed ones
sig2noise[np.isnan(sig2noise)] = 0.0
return sig2noise
def fft_correlate_images(image_a, image_b,
correlation_method="circular",
normalized_correlation=True):
""" FFT based cross correlation
of two images with multiple views of np.stride_tricks()
The 2D FFT should be applied to the last two axes (-2,-1) and the
zero axis is the number of the interrogation window
This should also work out of the box for rectangular windows.
Parameters
----------
image_a : 3d np.ndarray, first dimension is the number of windows,
and two last dimensions are interrogation windows of the first image
image_b : similar
correlation_method : string
one of the three methods implemented: 'circular' or 'linear'
[default: 'circular].
normalized_correlation : string
decides wetehr normalized correlation is done or not: True or False
[default: True].
"""
if normalized_correlation:
# remove the effect of stronger laser or
# longer exposure for frame B
# image_a = match_histograms(image_a, image_b)
# remove mean background, normalize to 0..1 range
image_a = normalize_intensity(image_a)
image_b = normalize_intensity(image_b)
s1 = np.array(image_a.shape[-2:])
s2 = np.array(image_b.shape[-2:])
if correlation_method == "linear":
# have to be normalized, mainly because of zero padding
size = s1 + s2 - 1
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = (slice(0, image_a.shape[0]),
slice((fsize[0]-s1[0])//2, (fsize[0]+s1[0])//2),
slice((fsize[1]-s1[1])//2, (fsize[1]+s1[1])//2))
f2a = rfft2(image_a, fsize, axes=(-2, -1)).conj()
f2b = rfft2(image_b, fsize, axes=(-2, -1))
corr = fftshift(irfft2(f2a * f2b).real, axes=(-2, -1))[fslice]
elif correlation_method == "circular":
corr = fftshift(irfft2(rfft2(image_a).conj() *
rfft2(image_b)).real, axes=(-2, -1))
else:
print("method is not implemented!")
if normalized_correlation:
corr = corr/(s2[0]*s2[1]) # for extended search area
corr = np.clip(corr, 0, 1)
return corr
def normalize_intensity(window):
"""Normalize interrogation window or strided image of many windows,
by removing the mean intensity value per window and clipping the
negative values to zero
Parameters
----------
window : 2d np.ndarray
the interrogation window array
Returns
-------
window : 2d np.ndarray
the interrogation window array, with mean value equal to zero and
intensity normalized to -1 +1 and clipped if some pixels are
extra low/high
"""
window = window.astype(np.float32)
window -= window.mean(axis=(-2, -1),
keepdims=True, dtype=np.float32)
tmp = window.std(axis=(-2, -1), keepdims=True)
window = np.divide(window, tmp, out=np.zeros_like(window),
where=(tmp != 0))
return np.clip(window, 0, window.max())
def correlate_windows(window_a, window_b, correlation_method="fft"):
"""Compute correlation function between two interrogation windows.
The correlation function can be computed by using the correlation
theorem to speed up the computation.
Parameters
----------
window_a : 2d np.ndarray
a two dimensions array for the first interrogation window,
window_b : 2d np.ndarray
a two dimensions array for the second interrogation window.
correlation_method : string, methods currently implemented:
'circular' - FFT based without zero-padding
'linear' - FFT based with zero-padding
'direct' - linear convolution based
Default is 'fft', which is much faster.
Returns
-------
corr : 2d np.ndarray
a two dimensions array for the correlation function.
Note that due to the wish to use 2^N windows for faster FFT
we use a slightly different convention for the size of the
correlation map. The theory says it is M+N-1, and the
'direct' method gets this size out
the FFT-based method returns M+N size out, where M is the window_size
and N is the search_area_size
It leads to inconsistency of the output
"""
# first we remove the mean to normalize contrast and intensity
# the background level which is take as a mean of the image
# is subtracted
# import pdb; pdb.set_trace()
window_a = normalize_intensity(window_a)
window_b = normalize_intensity(window_b)
# this is not really circular one, as we pad a bit to get fast 2D FFT,
# see fft_correlate for implementation
if correlation_method in ("circular", "fft"):
corr = fft_correlate_windows(window_a, window_b)
elif correlation_method == "linear":
# save the original size:
s1 = np.array(window_a.shape)
s2 = np.array(window_b.shape)
size = s1 + s2 - 1
fslice = tuple([slice(0, int(sz)) for sz in size])
# and slice only the relevant part
corr = fft_correlate_windows(window_a, window_b)[fslice]
elif correlation_method == "direct":
corr = convolve2d(window_a, window_b[::-1, ::-1], "full")
else:
raise ValueError("method is not implemented")
return corr
def fft_correlate_windows(window_a, window_b):
""" FFT based cross correlation
it is a so-called linear convolution based,
since we increase the size of the FFT to
reduce the edge effects.
This should also work out of the box for rectangular windows.
Parameters
----------
window_a : 2d np.ndarray
a two dimensions array for the first interrogation window,
window_b : 2d np.ndarray
a two dimensions array for the second interrogation window.
# from Stackoverflow:
from scipy import linalg
import numpy as np
# works for rectangular windows as well
x = [[1 , 0 , 0 , 0] , [0 , -1 , 0 , 0] , [0 , 0 , 3 , 0] ,
[0 , 0 , 0 , 1], [0 , 0 , 0 , 1]]
x = np.array(x,dtype=np.float)
y = [[4 , 5] , [3 , 4]]
y = np.array(y)
print ("conv:" , signal.convolve2d(x , y , 'full'))
s1 = np.array(x.shape)
s2 = np.array(y.shape)
size = s1 + s2 - 1
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = tuple([slice(0, int(sz)) for sz in size])
new_x = np.fft.fft2(x , fsize)
new_y = np.fft.fft2(y , fsize)
result = np.fft.ifft2(new_x*new_y)[fslice].copy()
print("fft for my method:" , np.array(result.real, np.int32))
"""
s1 = np.array(window_a.shape)
s2 = np.array(window_b.shape)
size = s1 + s2 - 1
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = tuple([slice(0, int(sz)) for sz in size])
f2a = rfft2(window_a, fsize)
f2b = rfft2(window_b[::-1, ::-1], fsize)
corr = irfft2(f2a * f2b).real[fslice]
return corr
def extended_search_area_piv(
frame_a,
frame_b,
window_size,
overlap=0,
dt=1.0,
search_area_size=None,
correlation_method="circular",
subpixel_method="gaussian",
sig2noise_method='peak2mean',
width=2,
normalized_correlation=False
):
"""Standard PIV cross-correlation algorithm, with an option for
extended area search that increased dynamic range. The search region
in the second frame is larger than the interrogation window size in the
first frame. For Cython implementation see
openpiv.process.extended_search_area_piv
This is a pure python implementation of the standard PIV cross-correlation
algorithm. It is a zero order displacement predictor, and no iterative
process is performed.
Parameters
----------
frame_a : 2d np.ndarray
an two dimensions array of integers containing grey levels of
the first frame.
frame_b : 2d np.ndarray
an two dimensions array of integers containing grey levels of
the second frame.
window_size : int
the size of the (square) interrogation window, [default: 32 pix].
overlap : int
the number of pixels by which two adjacent windows overlap
[default: 16 pix].
dt : float
the time delay separating the two frames [default: 1.0].
correlation_method : string
one of the two methods implemented: 'circular' or 'linear',
default: 'circular', it's faster, without zero-padding
'linear' requires also normalized_correlation = True (see below)
subpixel_method : string
one of the following methods to estimate subpixel location of the
peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
sig2noise_method : string
defines the method of signal-to-noise-ratio measure,
('peak2peak' or 'peak2mean'. If None, no measure is performed.)
nfftx : int
the size of the 2D FFT in x-direction,
[default: 2 x windows_a.shape[0] is recommended]
nffty : int
the size of the 2D FFT in y-direction,
[default: 2 x windows_a.shape[1] is recommended]
width : int
the half size of the region around the first
correlation peak to ignore for finding the second
peak. [default: 2]. Only used if ``sig2noise_method==peak2peak``.
search_area_size : int
the size of the interrogation window in the second frame,
default is the same interrogation window size and it is a
fallback to the simplest FFT based PIV
normalized_correlation: bool
if True, then the image intensity will be modified by removing
the mean, dividing by the standard deviation and
the correlation map will be normalized. It's slower but could be
more robust
Returns
-------
u : 2d np.ndarray
a two dimensional array containing the u velocity component,
in pixels/seconds.
v : 2d np.ndarray
a two dimensional array containing the v velocity component,
in pixels/seconds.
sig2noise : 2d np.ndarray, ( optional: only if sig2noise_method != None )
a two dimensional array the signal to noise ratio for each
window pair.
The implementation of the one-step direct correlation with different
size of the interrogation window and the search area. The increased
size of the search areas cope with the problem of loss of pairs due
to in-plane motion, allowing for a smaller interrogation window size,
without increasing the number of outlier vectors.
See:
Particle-Imaging Techniques for Experimental Fluid Mechanics
Annual Review of Fluid Mechanics
Vol. 23: 261-304 (Volume publication date January 1991)
DOI: 10.1146/annurev.fl.23.010191.001401
originally implemented in process.pyx in Cython and converted to
a NumPy vectorized solution in pyprocess.py
"""
# check the inputs for validity
if search_area_size is None:
search_area_size = window_size
if overlap >= window_size:
raise ValueError("Overlap has to be smaller than the window_size")
if search_area_size < window_size:
raise ValueError("Search size cannot be smaller than the window_size")
if (window_size > frame_a.shape[0]) or (window_size > frame_a.shape[1]):
raise ValueError("window size cannot be larger than the image")
# get field shape
n_rows, n_cols = get_field_shape(frame_a.shape, search_area_size, overlap)
# We implement the new vectorized code
aa = moving_window_array(frame_a, search_area_size, overlap)
bb = moving_window_array(frame_b, search_area_size, overlap)
# for the case of extended seearch, the window size is smaller than
# the search_area_size. In order to keep it all vectorized the
# approach is to use the interrogation window in both
# frames of the same size of search_area_asize,
# but mask out the region around
# the interrogation window in the frame A
if search_area_size > window_size:
# before masking with zeros we need to remove
# edges
aa = normalize_intensity(aa)
bb = normalize_intensity(bb)
mask = np.zeros((search_area_size, search_area_size)).astype(aa.dtype)
pad = int((search_area_size - window_size) / 2)
mask[slice(pad, search_area_size - pad),
slice(pad, search_area_size - pad)] = 1
mask = np.broadcast_to(mask, aa.shape)
aa *= mask
corr = fft_correlate_images(aa, bb,
correlation_method=correlation_method,
normalized_correlation=normalized_correlation)
u, v = correlation_to_displacement(corr, n_rows, n_cols,
subpixel_method=subpixel_method)
# return output depending if user wanted sig2noise information
if sig2noise_method is not None:
sig2noise = sig2noise_ratio(
corr, sig2noise_method=sig2noise_method, width=width
)
else:
sig2noise = np.zeros_like(u)*np.nan
sig2noise = sig2noise.reshape(n_rows, n_cols)
return u/dt, v/dt, sig2noise
def correlation_to_displacement(corr, n_rows, n_cols,
subpixel_method="gaussian"):
"""
Correlation maps are converted to displacement for each interrogation
window using the convention that the size of the correlation map
is 2N -1 where N is the size of the largest interrogation window
(in frame B) that is called search_area_size
Inputs:
corr : 3D nd.array
contains output of the fft_correlate_images
n_rows, n_cols : number of interrogation windows, output of the
get_field_shape
"""
# iterate through interrogation widows and search areas
u = np.zeros((n_rows, n_cols))
v = np.zeros((n_rows, n_cols))
# center point of the correlation map
default_peak_position = np.floor(np.array(corr[0, :, :].shape)/2)
for k in range(n_rows):
for m in range(n_cols):
# look at studying_correlations.ipynb
# the find_subpixel_peak_position returns
peak = np.array(find_subpixel_peak_position(corr[k*n_cols+m, :, :],
subpixel_method=subpixel_method)) -\
default_peak_position
# the horizontal shift from left to right is the u
# the vertical displacement from top to bottom (increasing row) is v
# x the vertical shift from top to bottom is row-wise shift is now
# a negative vertical
u[k, m], v[k, m] = peak[1], peak[0]
return (u, v)
def nextpower2(i):
""" Find 2^n that is equal to or greater than. """
n = 1
while n < i:
n *= 2
return n
| gpl-3.0 | -7,874,689,390,964,289,000 | 33.771868 | 80 | 0.614543 | false |
EvanK/ansible | lib/ansible/modules/network/nxos/nxos_snmp_location.py | 106 | 4000 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_location
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP location information.
description:
- Manages SNMP location configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
options:
location:
description:
- Location information.
required: true
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp location is configured
- nxos_snmp_location:
location: Test
state: present
# ensure snmp location is not configured
- nxos_snmp_location:
location: Test
state: absent
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server location New_Test"]
'''
import re
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
command = {
'command': command,
'output': 'text',
}
return run_commands(module, command)
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_location(module):
location = {}
location_regex = r'^\s*snmp-server\s+location\s+(?P<location>.+)$'
body = execute_show_command('show run snmp', module)[0]
match_location = re.search(location_regex, body, re.M)
if match_location:
location['location'] = match_location.group("location")
return location
def main():
argument_spec = dict(
location=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
location = module.params['location']
state = module.params['state']
existing = get_snmp_location(module)
commands = []
if state == 'absent':
if existing and existing['location'] == location:
commands.append('no snmp-server location')
elif state == 'present':
if not existing or existing['location'] != location:
commands.append('snmp-server location {0}'.format(location))
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,908,618,628,149,143,000 | 25.666667 | 81 | 0.654 | false |
brandond/ansible | lib/ansible/modules/network/f5/bigip_gtm_datacenter.py | 14 | 14902 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP.
version_added: 2.2
options:
contact:
description:
- The name of the contact for the data center.
description:
description:
- The description of the data center.
location:
description:
- The location of the data center.
name:
description:
- The name of the data center.
required: True
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create data center "New York"
bigip_gtm_datacenter:
name: New York
location: 222 West 23rd
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
contact:
description: The contact that was set on the datacenter.
returned: changed
type: str
sample: [email protected]
description:
description: The description that was set for the datacenter.
returned: changed
type: str
sample: Datacenter in NYC
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
disabled:
description: Whether the datacenter is disabled or not.
returned: changed
type: bool
sample: true
state:
description: State of the datacenter.
returned: changed
type: str
sample: disabled
location:
description: The location that is set for the datacenter.
returned: changed
type: str
sample: 222 West 23rd
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = [
'location', 'description', 'contact', 'state',
]
returnables = [
'location', 'description', 'contact', 'state', 'enabled', 'disabled',
]
api_attributes = [
'enabled', 'location', 'description', 'contact', 'disabled',
]
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return None
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return None
class ModuleParameters(Parameters):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
return None
@property
def state(self):
if self.enabled and self._values['state'] != 'present':
return 'enabled'
elif self.disabled and self._values['state'] != 'present':
return 'disabled'
else:
return self._values['state']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
class ReportableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['enabled', 'present']:
return False
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
elif self._values['state'] == 'disabled':
return False
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.enabled != self.have.enabled:
return dict(
state=self.want.state,
enabled=self.want.enabled
)
if self.want.disabled != self.have.disabled:
return dict(
state=self.want.state,
disabled=self.want.disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = kwargs.pop('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def create(self):
self.have = ApiParameters()
self.should_update()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the datacenter")
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
contact=dict(),
description=dict(),
location=dict(),
name=dict(required=True),
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,094,070,602,254,276,000 | 29.165992 | 91 | 0.600725 | false |
luogangyi/bcec-nova | nova/virt/hyperv/migrationops.py | 3 | 12857 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for migration / resize operations.
"""
import os
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.virt import configdrive
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class MigrationOps(object):
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._imagecache = imagecache.ImageCache()
def _migrate_disk_files(self, instance_name, disk_files, dest):
# TODO(mikal): it would be nice if this method took a full instance,
# because it could then be passed to the log messages below.
same_host = False
if dest in self._hostutils.get_local_ips():
same_host = True
LOG.debug(_("Migration target is the source host"))
else:
LOG.debug(_("Migration target host: %s") % dest)
instance_path = self._pathutils.get_instance_dir(instance_name)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name, remove_dir=True)
dest_path = None
try:
if same_host:
# Since source and target are the same, we copy the files to
# a temporary location before moving them into place
dest_path = '%s_tmp' % instance_path
if self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
self._pathutils.makedirs(dest_path)
else:
dest_path = self._pathutils.get_instance_dir(
instance_name, dest, remove_dir=True)
for disk_file in disk_files:
# Skip the config drive as the instance is already configured
if os.path.basename(disk_file).lower() != 'configdrive.vhd':
LOG.debug(_('Copying disk "%(disk_file)s" to '
'"%(dest_path)s"'),
{'disk_file': disk_file, 'dest_path': dest_path})
self._pathutils.copy(disk_file, dest_path)
self._pathutils.rename(instance_path, revert_path)
if same_host:
self._pathutils.rename(dest_path, instance_path)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_failed_disk_migration(instance_path, revert_path,
dest_path)
def _cleanup_failed_disk_migration(self, instance_path,
revert_path, dest_path):
try:
if dest_path and self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
if self._pathutils.exists(revert_path):
self._pathutils.rename(revert_path, instance_path)
except Exception as ex:
# Log and ignore this exception
LOG.exception(ex)
LOG.error(_("Cannot cleanup migration files"))
def _check_target_flavor(self, instance, flavor):
new_root_gb = flavor['root_gb']
curr_root_gb = instance['root_gb']
if new_root_gb < curr_root_gb:
raise exception.InstanceFaultRollback(
vmutils.VHDResizeException(
_("Cannot resize the root disk to a smaller size. "
"Current size: %(curr_root_gb)s GB. Requested size: "
"%(new_root_gb)s GB") %
{'curr_root_gb': curr_root_gb,
'new_root_gb': new_root_gb}))
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None):
LOG.debug(_("migrate_disk_and_power_off called"), instance=instance)
self._check_target_flavor(instance, flavor)
self._vmops.power_off(instance)
instance_name = instance["name"]
(disk_files,
volume_drives) = self._vmutils.get_vm_storage_paths(instance_name)
if disk_files:
self._migrate_disk_files(instance_name, disk_files, dest)
self._vmops.destroy(instance, destroy_disks=False)
# disk_info is not used
return ""
def confirm_migration(self, migration, instance, network_info):
LOG.debug(_("confirm_migration called"), instance=instance)
self._pathutils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
def _revert_migration_files(self, instance_name):
instance_path = self._pathutils.get_instance_dir(
instance_name, create_dir=False, remove_dir=True)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name)
self._pathutils.rename(revert_path, instance_path)
def _check_and_attach_config_drive(self, instance):
if configdrive.required_by(instance):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path:
self._vmops.attach_config_drive(instance, configdrive_path)
else:
raise vmutils.HyperVException(
_("Config drive is required by instance: %s, "
"but it does not exist.") % instance.name)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("finish_revert_migration called"), instance=instance)
instance_name = instance['name']
self._revert_migration_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path)
self._check_and_attach_config_drive(instance)
if power_on:
self._vmops.power_on(instance)
def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
os.path.basename(base_vhd_path))
try:
LOG.debug(_('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s'),
{'base_vhd_path': base_vhd_path,
'base_vhd_copy_path': base_vhd_copy_path})
self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
LOG.debug(_("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s"),
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_copy_path)
LOG.debug(_("Merging base disk %(base_vhd_copy_path)s and "
"diff disk %(diff_vhd_path)s"),
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)
# Replace the differential VHD with the merged one
self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_copy_path):
self._pathutils.remove(base_vhd_copy_path)
def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
curr_size = vhd_info['MaxInternalSize']
if new_size < curr_size:
raise vmutils.VHDResizeException(_("Cannot resize a VHD "
"to a smaller size"))
elif new_size > curr_size:
self._resize_vhd(vhd_path, new_size)
def _resize_vhd(self, vhd_path, new_size):
if vhd_path.split('.')[-1].lower() == "vhd":
LOG.debug(_("Getting parent disk info for disk: %s"), vhd_path)
base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
if base_disk_path:
# A differential VHD cannot be resized. This limitation
# does not apply to the VHDX format.
self._merge_base_vhd(vhd_path, base_disk_path)
LOG.debug(_("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s"),
{'vhd_path': vhd_path, 'new_size': new_size})
self._vhdutils.resize_vhd(vhd_path, new_size)
def _check_base_disk(self, context, instance, diff_vhd_path,
src_base_disk_path):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
# If the location of the base host differs between source
# and target hosts we need to reconnect the base disk
if src_base_disk_path.lower() != base_vhd_path.lower():
LOG.debug(_("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s"),
{'base_vhd_path': base_vhd_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
LOG.debug(_("finish_migration called"), instance=instance)
instance_name = instance['name']
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
if not root_vhd_path:
raise vmutils.HyperVException(_("Cannot find boot VHD "
"file for instance: %s") %
instance_name)
root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path)
src_base_disk_path = root_vhd_info.get("ParentPath")
if src_base_disk_path:
self._check_base_disk(context, instance, root_vhd_path,
src_base_disk_path)
if resize_instance:
new_size = instance['root_gb'] * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
if resize_instance:
new_size = instance.get('ephemeral_gb', 0) * units.Gi
if not eph_vhd_path:
if new_size:
eph_vhd_path = self._vmops.create_ephemeral_vhd(instance)
else:
eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path)
self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path)
self._check_and_attach_config_drive(instance)
if power_on:
self._vmops.power_on(instance)
| apache-2.0 | -9,201,883,551,362,900,000 | 43.030822 | 79 | 0.568951 | false |
petrutlucian94/cinder | cinder/tests/unit/test_ibm_flashsystem_iscsi.py | 18 | 9722 | # Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Tests for the IBM FlashSystem iSCSI volume driver.
"""
import mock
import six
import random
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import test_ibm_flashsystem as fscommon
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import flashsystem_iscsi
from cinder.volume import volume_types
class FlashSystemManagementSimulator(fscommon.FlashSystemManagementSimulator):
def __init__(self):
# Default protocol is iSCSI
self._protocol = 'iSCSI'
self._volumes_list = {}
self._hosts_list = {}
self._mappings_list = {}
self._next_cmd_error = {
'lsnode': '',
'lssystem': '',
'lsmdiskgrp': ''
}
self._errors = {
# CMMVC50000 is a fake error which indicates that command has not
# got expected results. This error represents kinds of CLI errors.
'CMMVC50000': ('', 'CMMVC50000 The command can not be executed '
'successfully.')
}
class FlashSystemFakeISCSIDriver(flashsystem_iscsi.FlashSystemISCSIDriver):
def __init__(self, *args, **kwargs):
super(FlashSystemFakeISCSIDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _ssh(self, cmd, check_exit_code=True):
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
return ret
class FlashSystemISCSIDriverTestCase(test.TestCase):
def _set_flag(self, flag, value):
group = self.driver.configuration.config_group
self.driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,
vol_name,
vol_size=10,
vol_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
if not vol_name:
vol_name = 'test_volume%s' % rand_id
return {'name': vol_name,
'size': vol_size,
'id': '%s' % rand_id,
'volume_type_id': None,
'status': vol_status,
'mdisk_grp_name': 'mdiskgrp0'}
def _generate_snap_info(self,
vol_name,
vol_id,
vol_size,
vol_status,
snap_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
return {'name': 'test_snap_%s' % rand_id,
'id': rand_id,
'volume': {'name': vol_name,
'id': vol_id,
'size': vol_size,
'status': vol_status},
'volume_size': vol_size,
'status': snap_status,
'mdisk_grp_name': 'mdiskgrp0'}
def setUp(self):
super(FlashSystemISCSIDriverTestCase, self).setUp()
self._def_flags = {'san_ip': 'hostname',
'san_login': 'username',
'san_password': 'password',
'flashsystem_connection_protocol': 'iSCSI',
'flashsystem_multipath_enabled': False,
'flashsystem_multihostmap_enabled': True,
'iscsi_ip_address': '192.168.1.10',
'flashsystem_iscsi_portid': 1}
self.connector = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
self.sim = FlashSystemManagementSimulator()
self.driver = FlashSystemFakeISCSIDriver(
configuration=conf.Configuration(None))
self.driver.set_fake_storage(self.sim)
self._reset_flags()
self.ctxt = context.get_admin_context()
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.sleeppatch = mock.patch('eventlet.greenthread.sleep')
self.sleeppatch.start()
def tearDown(self):
self.sleeppatch.stop()
super(FlashSystemISCSIDriverTestCase, self).tearDown()
def test_flashsystem_do_setup(self):
# case 1: set as iSCSI
self.sim.set_protocol('iSCSI')
self._set_flag('flashsystem_connection_protocol', 'iSCSI')
self.driver.do_setup(None)
self.assertEqual('iSCSI', self.driver._protocol)
# clear environment
self.sim.set_protocol('iSCSI')
self._reset_flags()
def test_flashsystem_validate_connector(self):
conn_neither = {'host': 'host'}
conn_iscsi = {'host': 'host', 'initiator': 'foo'}
conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
protocol = self.driver._protocol
# case 1: when protocol is iSCSI
self.driver._protocol = 'iSCSI'
self.driver.validate_connector(conn_iscsi)
self.driver.validate_connector(conn_both)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_neither)
# clear environment
self.driver._protocol = protocol
def test_flashsystem_connection(self):
# case 1: initialize_connection/terminate_connection with iSCSI
self.sim.set_protocol('iSCSI')
self._set_flag('flashsystem_connection_protocol', 'iSCSI')
self.driver.do_setup(None)
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.driver.initialize_connection(vol1, self.connector)
self.driver.terminate_connection(vol1, self.connector)
# clear environment
self.driver.delete_volume(vol1)
self.sim.set_protocol('iSCSI')
self._reset_flags()
def test_flashsystem_create_host(self):
# case 1: create host with iqn
self.sim.set_protocol('iSCSI')
self._set_flag('flashsystem_connection_protocol', 'iSCSI')
self.driver.do_setup(None)
conn = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
host = self.driver._create_host(conn)
# case 2: delete host
self.driver._delete_host(host)
# clear environment
self.sim.set_protocol('iSCSI')
self._reset_flags()
def test_flashsystem_get_vdisk_params(self):
# case 1: use default params
self.driver._get_vdisk_params(None)
# case 2: use extra params from type
opts1 = {'storage_protocol': 'iSCSI'}
opts2 = {'capabilities:storage_protocol': 'iSCSI'}
opts3 = {'storage_protocol': 'FC'}
type1 = volume_types.create(self.ctxt, 'opts1', opts1)
type2 = volume_types.create(self.ctxt, 'opts2', opts2)
type3 = volume_types.create(self.ctxt, 'opts3', opts3)
self.assertEqual(
'iSCSI',
self.driver._get_vdisk_params(type1['id'])['protocol'])
self.assertEqual(
'iSCSI',
self.driver._get_vdisk_params(type2['id'])['protocol'])
self.assertRaises(exception.InvalidInput,
self.driver._get_vdisk_params,
type3['id'])
# clear environment
volume_types.destroy(self.ctxt, type1['id'])
volume_types.destroy(self.ctxt, type2['id'])
volume_types.destroy(self.ctxt, type3['id'])
def test_flashsystem_map_vdisk_to_host(self):
# case 1: no host found
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.assertEqual(
# lun id shoud begin with 1
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# case 2: host already exists
vol2 = self._generate_vol_info(None)
self.driver.create_volume(vol2)
self.assertEqual(
# lun id shoud be sequential
2,
self.driver._map_vdisk_to_host(vol2['name'], self.connector))
# case 3: test if already mapped
self.assertEqual(
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# clean environment
self.driver._unmap_vdisk_from_host(vol1['name'], self.connector)
self.driver._unmap_vdisk_from_host(vol2['name'], self.connector)
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
# case 4: If there is no vdisk mapped to host, host should be removed
self.assertEqual(
None,
self.driver._get_host_from_connector(self.connector))
| apache-2.0 | 4,882,847,486,622,533,000 | 35.548872 | 78 | 0.582288 | false |
geodynamics/snac | Snac/pyre/Inlet.py | 5 | 3206 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class Inlet(object):
def __init__(self):
self._handle = None
return
def impose(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.Inlet_impose(self._handle)
return
def recv(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.Inlet_recv(self._handle)
return
def storeTimestep(self, fge_t, cge_t):
import Snac.pyre.Exchanger as Exchanger
Exchanger.Inlet_storeTimestep(self._handle, fge_t, cge_t)
return
"""
class SVTInlet(Inlet):
def __init__(self, mesh, sink, all_variables):
import CitcomS.Exchanger as Exchanger
self._handle = Exchanger.SVTInlet_create(mesh,
sink,
all_variables)
return
"""
class VInlet(Inlet):
def __init__(self, mesh, sink, all_variables):
import Snac.pyre.Exchanger as Exchanger
self._handle = Exchanger.VInlet_create(mesh,
sink,
all_variables)
return
def storeVold(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.VInlet_storeVold(self._handle)
return
def readVold(self):
import Snac.pyre.Exchanger as Exchanger
Exchanger.VInlet_readVold(self._handle)
return
"""
class VTInlet(Inlet):
def __init__(self, mesh, sink, all_variables):
import Snac.pyre.Exchanger as Exchanger
self._handle = Exchanger.VTInlet_create(mesh,
sink,
all_variables)
return
class BoundaryVTInlet(Inlet):
'''Available modes -- see above
'''
def __init__(self, communicator, boundary, sink, all_variables, mode="VT"):
import CitcomS.Exchanger as Exchanger
self._handle = Exchanger.BoundaryVTInlet_create(communicator.handle(),
boundary,
sink,
all_variables,
mode)
return
class TractionInlet(Inlet):
'''Inlet that impose velocity and/or traction on the boundary
Available modes --
"F": traction only
"V": velocity only
"FV": normal velocity and tangent traction
'''
def __init__(self, boundary, sink, all_variables, mode='F'):
import CitcomS.Exchanger as Exchanger
self._handle = Exchanger.TractionInlet_create(boundary,
sink,
all_variables,
mode)
return
"""
# version
__id__ = "$Id: Inlet.py,v 1.6 2004/05/11 07:59:31 tan2 Exp $"
# End of file
| gpl-2.0 | -1,576,294,592,112,514,000 | 25.278689 | 80 | 0.471616 | false |
Yong-Lee/decode-Django | Django-1.5.1/django/contrib/gis/tests/geoapp/models.py | 112 | 1877 | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
from django.utils.encoding import python_2_unicode_compatible
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __str__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __str__(self): return self.name
class Truth(models.Model):
val = models.BooleanField()
objects = models.GeoManager()
if not spatialite:
@python_2_unicode_compatible
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __str__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| gpl-2.0 | -5,466,062,899,039,462,000 | 32.517857 | 79 | 0.710176 | false |
kagayakidan/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause | -5,553,514,872,605,611,000 | 32.032258 | 79 | 0.508789 | false |
lferr/charm | charm/schemes/ibenc/ibenc_lsw08.py | 3 | 4047 | '''
Allison Lewko, Amit Sahai and Brent Waters (Pairing-based)
| From: "Revocation Systems with Very Small Private Keys"
| Published in: IEEE S&P 2010
| Available from: http://eprint.iacr.org/2008/309.pdf
| Notes: fully secure IBE Construction with revocable keys.
* type: identity-based encryption (public key)
* setting: Pairing
:Authors: J Ayo Akinyele
:Date: 1/2012
'''
from charm.toolbox.pairinggroup import ZR,G1,pair
from charm.toolbox.IBEnc import *
debug = False
class IBE_Revoke(IBEnc):
"""
>>> from charm.toolbox.pairinggroup import PairingGroup, GT, G2
>>> group = PairingGroup('SS512')
>>> num_users = 5 # total # of users
>>> ibe = IBE_Revoke(group)
>>> ID = "[email protected]"
>>> S = ["[email protected]", "[email protected]", "[email protected]"]
>>> (master_public_key, master_secret_key) = ibe.setup(num_users)
>>> secret_key = ibe.keygen(master_public_key, master_secret_key, ID)
>>> msg = group.random(GT)
>>> cipher_text = ibe.encrypt(master_public_key, msg, S)
>>> decrypted_msg = ibe.decrypt(S, cipher_text, secret_key)
>>> decrypted_msg == msg
True
"""
def __init__(self, groupObj):
IBEnc.__init__(self)
global group, util
group = groupObj
def setup(self, n):
g, w, h, v, v1, v2 = group.random(G1, 6)
a1, a2, b, alpha = group.random(ZR, 4)
tau1 = v * (v1 ** a1)
tau2 = v * (v2 ** a2)
pk = {'n':n, 'g':g, 'g^b':g ** b, 'g^a1':g ** a1, 'g^a2':g ** a2,
'g^ba1':g ** (b * a1), 'g^ba2':g ** (b * a2), 'tau1':tau1, 'tau2':tau2,
'tau1^b':tau1 ** b, 'tau2^b':tau2 ** b, 'w':w, 'h':h,
'egg_alpha': pair(g, g) ** (alpha * a1 * b)}
sk = {'g^alph':g ** alpha, 'g^alph_a1':g ** (alpha * a1),
'g^b':g ** b,'v':v, 'v1':v1, 'v2':v2, 'alpha':alpha }
return (pk, sk)
def keygen(self, mpk, msk, ID):
d1, d2, z1, z2 = group.random(ZR, 4)
d = d1 + d2
_ID = group.hash(ID.upper())
D = {}
D[1] = msk['g^alph_a1'] * (msk['v'] ** d)
D[2] = (mpk['g'] ** -msk['alpha']) * (msk['v1'] ** d) * (mpk['g'] ** z1)
D[3] = mpk['g^b'] ** -z1
D[4] = (msk['v2'] ** d) * (mpk['g'] ** z2)
D[5] = mpk['g^b'] ** -z2
D[6] = mpk['g^b'] ** d2
D[7] = mpk['g'] ** d1
K = ((mpk['w'] ** _ID) * mpk['h']) ** d1
sk = { 'ID':_ID, 'D':D, 'K':K }
return sk
def encrypt(self, mpk, M, S):
s1, s2 = group.random(ZR, 2)
s = s1 + s2
# number of revoked users
r = len(S); t_r = group.random(ZR, r)
t = 0
for i in t_r: t += i
C = {}
C[0] = M * (mpk['egg_alpha'] ** s2)
C[1] = mpk['g^b'] ** s
C[2] = mpk['g^ba1'] ** s1
C[3] = mpk['g^a1'] ** s1
C[4] = mpk['g^ba2'] ** s2
C[5] = mpk['g^a2'] ** s2
C[6] = (mpk['tau1'] ** s1) * (mpk['tau2'] ** s2)
C[7] = (mpk['tau1^b'] ** s1) * (mpk['tau2^b'] ** s2) * (mpk['w'] ** -t)
c1 = [i for i in range(r)]; c2 = [i for i in range(r)]
for i in range(len(t_r)):
c1[i] = mpk['g'] ** t_r[i]
S_hash = group.hash(S[i].upper())
c2[i] = ((mpk['w'] ** S_hash) * mpk['h']) ** t_r[i]
C['i1'] = c1
C['i2'] = c2
return C
def decrypt(self, S, ct, sk):
C, D, K = ct, sk['D'], sk['K']
_ID = sk['ID']
# hash IDs
S_id = [group.hash(i.upper()) for i in S]
if debug: print("hashed IDs: ", S_id)
if _ID in S_id: print("Your ID:", _ID, "is in revoked list!"); return
A1 = pair(C[1], D[1]) * pair(C[2], D[2]) * pair(C[3], D[3]) * pair(C[4], D[4]) * pair(C[5], D[5])
A2 = pair(C[6], D[6]) * pair(C[7], D[7])
A3 = A1 / A2
A4 = 1
for i in range(len(S_id)):
A4 *= (pair(C['i1'][i], K) / pair(C['i2'][i], D[7])) ** (1 / (_ID - S_id[i]))
return C[0] / (A3 / A4)
| lgpl-3.0 | 6,864,128,126,658,822,000 | 34.814159 | 105 | 0.451198 | false |
Ictp/indico | ez_setup.py | 11 | 10278 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| gpl-3.0 | 5,455,021,603,961,303,000 | 35.190141 | 86 | 0.652267 | false |
ibusybox/pkcs | src/main/python/pkcs/opensslconf.py | 1 | 3420 | #!/usr/bin/env python
# coding=utf8
caOpensslConf = '''
#http://www.phildev.net/ssl/opensslconf.html
[ ca ]
default_ca = CA_default
[CA_default]
caroot = %(caroot)s
certs = $caroot/certsdb
new_certs_dir = $certs
database = $caroot/index.txt
certificate = $caroot/%(cn)s.cer
private_key = $caroot/%(cn)s-key.pem
serial = $caroot/serial
#crldir = $caroot/crl
#crlnumber = $caroot/crlnumber
#crl = $crldir/crl.pem
RANDFILE = $caroot/private/.rand
x509_extensions = usr_cert
#copy_extensions = copy
name_opt = ca_default
cert_opt = ca_default
default_days = 365
#default_crl_days= 30
default_md = sha256
preserve = no
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = match
localityName = supplied
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 4096
default_keyfile = privkey.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
x509_extensions = v3_ca
req_extensions = v3_req
string_mask = nombstr
[ req_distinguished_name ]
C = %(c)s
ST = %(st)s
L = %(l)s
O = %(o)s
OU = %(ou)s
CN = %(cn)s
#emailAddress = $ENV:REQ_EMAIL
[ req_attributes ]
[ usr_cert ]
basicConstraints = CA:false
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
subjectAltName = $ENV::SUBJECT_ALT_NAME
[ v3_req ]
#subjectAltName = %(subjectAltName)s
[ v3_ca ]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer:always
basicConstraints = CA:true
'''
certOpensslConf = '''
#http://www.phildev.net/ssl/opensslconf.html
x509_extensions = usr_cert
#copy_extensions = copy
default_days = 365
#default_crl_days= 30
default_md = sha256
preserve = no
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = match
localityName = supplied
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 4096
default_keyfile = privkey.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
x509_extensions = v3_req
req_extensions = v3_req
string_mask = nombstr
[ req_distinguished_name ]
C = %(c)s
ST = %(st)s
L = %(l)s
O = %(o)s
OU = %(ou)s
CN = %(cn)s
#emailAddress = $ENV:REQ_EMAIL
[ req_attributes ]
[ usr_cert ]
basicConstraints = CA:false
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
[ v3_req ]
subjectAltName = %(subjectAltName)s
'''
| apache-2.0 | 6,637,962,201,002,830,000 | 20.352941 | 49 | 0.62076 | false |
saisiddhant12/time_management_system | db.py | 1 | 1248 | import time
import datetime
import sqlite3
conn = sqlite3.connect('diary.db')
c = conn.cursor()
c.execute('''CREATE TABLE executive (unid INTEGER PRIMARY KEY,name text,designation text, abs text)''')
no_of_exec = input() #enter the total number of executives
while(no_of_exec):
no_of_exec = no_of_exec-1
eid = input("Enter the eid of executives") #enter the assigned UID to the executive Enter 0 to directly view the table
nam = raw_input("Enter the name of the executive") #enter the name of the executive
des = raw_input("Enter the mail ID")
c.execute("INSERT INTO executive (unid,name,designation) VALUES (?,?,?)",(eid,nam,des)) #Inserting credentials into database
a = input("Enter 1 for taking a leave")
if a == 1:
yyyy = input("Enter the year\t")
mm = input("Enter the month\t")
dd = input("Enter the days\t")
leave = str(datetime.datetime(yyyy, mm, dd)) #enter the leave date and duration
c.execute("UPDATE executive SET abs=('?') WHERE unid = eid",(leave))
conn.commit()
#c.execute("DROP TABLE diary.db.executive")
for row in c.execute('SELECT * FROM executive '): #Displays the database
print(row)
conn.close() | gpl-2.0 | 57,883,323,004,151,864 | 39.290323 | 136 | 0.651442 | false |
lra/boto | tests/unit/vpc/test_subnet.py | 113 | 5485 | from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, Subnet
class TestDescribeSubnets(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeSubnetsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<subnetSet>
<item>
<subnetId>subnet-9d4a7b6c</subnetId>
<state>available</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.1.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<defaultForAz>false</defaultForAz>
<mapPublicIpOnLaunch>false</mapPublicIpOnLaunch>
<tagSet/>
</item>
<item>
<subnetId>subnet-6e7f829e</subnetId>
<state>available</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.0.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<defaultForAz>false</defaultForAz>
<mapPublicIpOnLaunch>false</mapPublicIpOnLaunch>
<tagSet/>
</item>
</subnetSet>
</DescribeSubnetsResponse>
"""
def test_get_all_subnets(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_subnets(
['subnet-9d4a7b6c', 'subnet-6e7f829e'],
filters=OrderedDict([('state', 'available'),
('vpc-id', ['subnet-9d4a7b6c', 'subnet-6e7f829e'])]))
self.assert_request_parameters({
'Action': 'DescribeSubnets',
'SubnetId.1': 'subnet-9d4a7b6c',
'SubnetId.2': 'subnet-6e7f829e',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'available',
'Filter.2.Name': 'vpc-id',
'Filter.2.Value.1': 'subnet-9d4a7b6c',
'Filter.2.Value.2': 'subnet-6e7f829e'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 2)
self.assertIsInstance(api_response[0], Subnet)
self.assertEqual(api_response[0].id, 'subnet-9d4a7b6c')
self.assertEqual(api_response[1].id, 'subnet-6e7f829e')
class TestCreateSubnet(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<subnet>
<subnetId>subnet-9d4a7b6c</subnetId>
<state>pending</state>
<vpcId>vpc-1a2b3c4d</vpcId>
<cidrBlock>10.0.1.0/24</cidrBlock>
<availableIpAddressCount>251</availableIpAddressCount>
<availabilityZone>us-east-1a</availabilityZone>
<tagSet/>
</subnet>
</CreateSubnetResponse>
"""
def test_create_subnet(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_subnet(
'vpc-1a2b3c4d', '10.0.1.0/24', 'us-east-1a')
self.assert_request_parameters({
'Action': 'CreateSubnet',
'VpcId': 'vpc-1a2b3c4d',
'CidrBlock': '10.0.1.0/24',
'AvailabilityZone': 'us-east-1a'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, Subnet)
self.assertEquals(api_response.id, 'subnet-9d4a7b6c')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.vpc_id, 'vpc-1a2b3c4d')
self.assertEquals(api_response.cidr_block, '10.0.1.0/24')
self.assertEquals(api_response.available_ip_address_count, 251)
self.assertEquals(api_response.availability_zone, 'us-east-1a')
class TestDeleteSubnet(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteSubnetResponse>
"""
def test_delete_subnet(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_subnet('subnet-9d4a7b6c')
self.assert_request_parameters({
'Action': 'DeleteSubnet',
'SubnetId': 'subnet-9d4a7b6c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| mit | 5,908,669,427,398,076,000 | 40.240602 | 86 | 0.571559 | false |
pravsripad/jumeg | jumeg/jumeg_test.py | 3 | 1803 | #!/usr/bin/env python
import jumeg
import os.path
raw_fname = "109925_CAU01A_100715_0842_2_c,rfDC-raw.fif"
if not os.path.isfile(raw_fname):
print("Please find the test file at the below location on the meg_store2 network drive - \
cp /data/meg_store2/fif_data/jumeg_test_data/109925_CAU01A_100715_0842_2_c,rfDC-raw.fif .")
# Function to check and explain the file naming standards
#jumeg.jumeg_utils.check_jumeg_standards(raw_fname)
# Function to apply noise reducer
jumeg.jumeg_noise_reducer.noise_reducer(raw_fname, verbose=True)
# Filter functions
#jumeg.jumeg_preprocessing.apply_filter(raw_fname)
fclean = raw_fname[:raw_fname.rfind('-raw.fif')] + ',bp1-45Hz-raw.fif'
# Evoked functions
#jumeg.jumeg_preprocessing.apply_average(fclean)
# ICA functions
#jumeg.jumeg_preprocessing.apply_ica(fclean)
fica_name = fclean[:fclean.rfind('-raw.fif')] + '-ica.fif'
# Perform ECG/EOG rejection using ICA
#jumeg.jumeg_preprocessing.apply_ica_cleaning(fica_name)
#jumeg.jumeg_preprocessing.apply_ica_cleaning(fica_name, unfiltered=True)
# OCARTA cleaning
from jumeg.decompose import ocarta
ocarta_obj = ocarta.JuMEG_ocarta()
ocarta_obj.fit(fclean, unfiltered=False, verbose=True)
# CTPS functions
#jumeg.jumeg_preprocessing.apply_ctps(fica_name)
fctps_name = '109925_CAU01A_100715_0842_2_c,rfDC,bp1-45Hz,ctps-trigger.npy'
#jumeg.jumeg_preprocessing.apply_ctps_select_ic(fctps_name)
# Function recompose brain response components only
fname_ctps_ics = '109925_CAU01A_100715_0842_2_c,rfDC,bp1-45Hz,ctps-trigger-ic_selection.txt'
#jumeg.jumeg_preprocessing.apply_ica_select_brain_response(fname_ctps_ics)
# Function to process empty file
empty_fname = '109925_CAU01A_100715_0844_2_c,rfDC-empty.fif'
#jumeg.jumeg_preprocessing.apply_create_noise_covariance(empty_fname, verbose=True)
| bsd-3-clause | 3,525,905,056,052,199,000 | 35.06 | 102 | 0.770937 | false |
WoLpH/EventGhost | _build/builder/Logging.py | 1 | 1901 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import logging
import sys
class StdHandler(object):
indent = 0
def __init__(self, oldStream, logger):
self.oldStream = oldStream
self.encoding = oldStream.encoding
self.buf = ""
self.logger = logger
# the following is a workaround for colorama (0.3.6),
# which is called by sphinx (build CHM docs).
self.closed = False
def flush(self):
pass
def isatty(self):
return True
def write(self, data):
try:
self.buf += data
except UnicodeError:
self.buf += data.decode('mbcs')
lines = self.buf.split("\n")
for line in self.buf.split("\n")[:-1]:
line = (self.indent * 4 * " ") + line.rstrip()
self.logger(line)
self.oldStream.write(line + "\n")
self.buf = lines[-1]
def LogToFile(file):
logging.basicConfig(filename=file, level=logging.DEBUG,)
logging.getLogger().setLevel(20)
sys.stdout = StdHandler(sys.stdout, logging.info)
sys.stderr = StdHandler(sys.stderr, logging.error)
def SetIndent(level):
StdHandler.indent = level
| gpl-2.0 | -8,050,787,675,913,965,000 | 30.147541 | 77 | 0.652632 | false |
jgraham/servo | tests/wpt/web-platform-tests/tools/six/six.py | 426 | 27961 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.8.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mpl-2.0 | 8,156,796,184,431,290,000 | 34.52859 | 98 | 0.633382 | false |
ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/integrate/_ode.py | 5 | 28343 | # Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f,jac=None)
integrator = integrator.set_integrator(name,**params)
integrator = integrator.set_initial_value(y0,t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1,step=0,relax=0)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real valued system. It supports the real valued solvers (i.e not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz
# To wrap cvode to Python, one must write extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccesful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
__version__ = "$Id$"
__docformat__ = "restructuredtext en"
import re
import warnings
from numpy import asarray, array, zeros, int32, isscalar, real, imag
import vode as _vode
import _dop
#------------------------------------------------------------------------------
# User interface
#------------------------------------------------------------------------------
class ode(object):
"""\
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
Parameters
----------
f : callable f(t, y, *f_args)
Rhs of the equation. t is a scalar, y.shape == (n,).
f_args is set by calling set_f_params(*args)
jac : callable jac(t, y, *jac_args)
Jacobian of the rhs, jac[i,j] = d f[i] / d y[j]
jac_args is set by calling set_f_params(*args)
Attributes
----------
t : float
Current time
y : ndarray
Current variable values
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- rband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+rband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+lband, j] = jac[i,j].
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
Whether to use the jacobian
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: [email protected], [email protected]
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
>>> return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
>>> return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf', with_jacobian=True)
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
>>> r.integrate(r.t+dt)
>>> print r.t, r.y
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self.y = []
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self.y)
if not n_prev:
self.set_integrator('') # find first available integrator
self.y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self.y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
integrator_params :
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self.y):
self.t = 0.0
self.y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self.y), self.jac is not None)
return self
def integrate(self, t, step=0, relax=0):
"""Find y=y(t), set y as an initial condition, and return y."""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
self.y, self.t = mth(self.f, self.jac or (lambda: None),
self.y, self.t, t,
self.f_params, self.jac_params)
return self.y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable f(t, y, *f_args)
Rhs of the equation. t is a scalar, y.shape == (n,).
f_args is set by calling set_f_params(*args)
jac : jac(t, y, *jac_args)
Jacobian of the rhs, jac[i,j] = d f[i] / d y[j]
jac_args is set by calling set_f_params(*args)
Attributes
----------
t : float
Current time
y : ndarray
Current variable values
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is not None:
ode.__init__(self, self._wrap, self._wrap_jac)
else:
ode.__init__(self, self._wrap, None)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
self.jac_tmp[1::2, 1::2] = self.jac_tmp[::2, ::2] = real(jac)
self.jac_tmp[1::2, ::2] = imag(jac)
self.jac_tmp[::2, 1::2] = -self.jac_tmp[1::2, ::2]
return self.jac_tmp
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
integrator_params :
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode should be used with ode, not zode")
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
if self.cjac is not None:
self.jac_tmp = zeros((y.size * 2, y.size * 2), 'float')
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=0, relax=0):
"""Find y=y(t), set y as an initial condition, and return y."""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
#------------------------------------------------------------------------------
# ODE integrators
#------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase(object):
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
supports_run_relax = None
supports_step = None
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
#XXX: __str__ method for getting visual state of the integrator
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=0,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
miter = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
miter = 4
else:
if self.mu is None and self.ml is None:
if self.with_jacobian:
miter = 2
else:
miter = 0
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
if self.ml == self.mu == 0:
miter = 3
else:
miter = 5
mf = 10 * self.meth + miter
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if miter in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, *args):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
y1, t, istate = self.runner(*(args[:5] + tuple(self.call_args) +
args[5:]))
if istate < 0:
warnings.warn('vode: ' +
self.messages.get(istate,
'Unexpected istate=%s' % istate))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
miter = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
miter = 4
else:
if self.mu is None and self.ml is None:
if self.with_jacobian:
miter = 2
else:
miter = 0
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
if self.ml == self.mu == 0:
miter = 3
else:
miter = 5
mf = 10 * self.meth + miter
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if miter in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, *args):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
y1, t, istate = self.runner(*(args[:5] + tuple(self.call_args) +
args[5:]))
if istate < 0:
warnings.warn('zvode: ' +
self.messages.get(istate, 'Unexpected istate=%s' % istate))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
return y1, t
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
messages = {1: 'computation successful',
2: 'comput. successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nmax is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.success = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, idid = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
if idid < 0:
warnings.warn(self.name + ': ' +
self.messages.get(idid, 'Unexpected idid=%s' % idid))
self.success = 0
return y, x
def _solout(self, *args):
# dummy solout function
pass
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.success = 1
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
| gpl-3.0 | -3,308,868,915,932,437,000 | 30.917793 | 80 | 0.538334 | false |
brianmhunt/SIWorldMap | werkzeug/script.py | 89 | 11151 | # -*- coding: utf-8 -*-
r'''
werkzeug.script
~~~~~~~~~~~~~~~
.. admonition:: Deprecated Functionality
``werkzeug.script`` is deprecated without replacement functionality.
Python's command line support improved greatly with :mod:`argparse`
and a bunch of alternative modules.
Most of the time you have recurring tasks while writing an application
such as starting up an interactive python interpreter with some prefilled
imports, starting the development server, initializing the database or
something similar.
For that purpose werkzeug provides the `werkzeug.script` module which
helps you writing such scripts.
Basic Usage
-----------
The following snippet is roughly the same in every werkzeug script::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug import script
# actions go here
if __name__ == '__main__':
script.run()
Starting this script now does nothing because no actions are defined.
An action is a function in the same module starting with ``"action_"``
which takes a number of arguments where every argument has a default. The
type of the default value specifies the type of the argument.
Arguments can then be passed by position or using ``--name=value`` from
the shell.
Because a runserver and shell command is pretty common there are two
factory functions that create such commands::
def make_app():
from yourapplication import YourApplication
return YourApplication(...)
action_runserver = script.make_runserver(make_app, use_reloader=True)
action_shell = script.make_shell(lambda: {'app': make_app()})
Using The Scripts
-----------------
The script from above can be used like this from the shell now:
.. sourcecode:: text
$ ./manage.py --help
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
$ ./manage.py runserver -p 4000
$ ./manage.py shell
As you can see it's possible to pass parameters as positional arguments
or as named parameters, pretty much like Python function calls.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
'''
import sys
import inspect
import getopt
from os.path import basename
argument_types = {
bool: 'boolean',
str: 'string',
int: 'integer',
float: 'float'
}
converters = {
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
'string': str,
'integer': int,
'float': float
}
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError, e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in arguments.iteritems():
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
def fail(message, code=-1):
"""Fail with an error."""
print >> sys.stderr, 'Error:', message
sys.exit(code)
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in namespace.iteritems():
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions
def print_usage(actions):
"""Print the usage information. (Help screen)"""
actions = actions.items()
actions.sort()
print 'usage: %s <action> [<options>]' % basename(sys.argv[0])
print ' %s --help' % basename(sys.argv[0])
print
print 'actions:'
for name, (func, doc, arguments) in actions:
print ' %s:' % name
for line in doc.splitlines():
print ' %s' % line
if arguments:
print
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print ' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
)
else:
print ' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
)
print
def analyse_action(func):
"""Analyse a function."""
description = inspect.getdoc(func) or 'undocumented action'
arguments = []
args, varargs, kwargs, defaults = inspect.getargspec(func)
if varargs or kwargs:
raise TypeError('variable length arguments for action not allowed.')
if len(args) != len(defaults or ()):
raise TypeError('not all arguments have proper definitions')
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
if arg.startswith('_'):
raise TypeError('arguments may not start with an underscore')
if not isinstance(definition, tuple):
shortcut = None
default = definition
else:
shortcut, default = definition
argument_type = argument_types[type(default)]
if isinstance(default, bool) and default is True:
arg = 'no-' + arg
arguments.append((arg.replace('_', '-'), shortcut,
default, argument_type))
return func, description, arguments
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(global_ns={}, local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app, reloader, debugger, evalex,
extra_files, 1, threaded, processes,
static_files=static_files, ssl_context=ssl_context)
return action
| mit | 2,743,942,439,959,080,000 | 34.626198 | 85 | 0.603892 | false |
ner0x652/RElief | elfie.py | 1 | 6773 | #!/usr/bin/env python3
import lief
import sys
import termcolor as tc
def get_typeval_as_str(lief_type):
return str(lief_type).split('.')[1]
def show_name(binary):
print(tc.colored("[::] Name", "blue"))
print(binary.name)
def enum_header(header):
def get_ident_props():
identity = "\n{0:18} {1}".format("\t\tClass:", get_typeval_as_str(header.identity_class))
identity += "\n{0:18} {1}".format("\t\tData:", get_typeval_as_str(header.identity_data))
identity += "\n{0:18} {1}".format("\t\tOS ABI:", get_typeval_as_str(header.identity_os_abi))
identity += "\n{0:18} {1}".format("\t\tVersion:", get_typeval_as_str(header.identity_version))
identity += "\n{0:18} {1}".format("\t\tMachine:", get_typeval_as_str(header.machine_type))
return identity
print(tc.colored("[::] Header", "blue"))
print(tc.colored("{0:25} {1}".format("\tEntrypoint:", hex(header.entrypoint)), "green"))
print(tc.colored("{0:25} {1}".format("\tFile type:", get_typeval_as_str(header.file_type)), "green"))
print(tc.colored("{0:25} {1}".format("\tHeader size:", hex(header.header_size)), "green"))
print(tc.colored("{0:25} {1}".format("\tIdentity:", get_ident_props()), "cyan"))
print(tc.colored("{0:25} {1}".format("\tNumber of sections:", header.numberof_sections), "green"))
print(tc.colored("{0:25} {1}".format("\tNumber of segments:", header.numberof_segments), "green"))
print(tc.colored("{0:25} {1}".format("\tObject file version:", get_typeval_as_str(header.object_file_version)), "green"))
print(tc.colored("{0:25} {1}".format("\tProcessor flag:", header.processor_flag), "green"))
print(tc.colored("{0:25} {1}".format("\tProgram header offset:", hex(header.program_header_offset)), "green"))
print(tc.colored("{0:25} {1}".format("\tProgram header size:", hex(header.program_header_size)), "green"))
print(tc.colored("{0:25} {1}".format("\tSection header offset:", hex(header.section_header_offset)), "green"))
print(tc.colored("{0:25} {1}".format("\tSection name table idx:", hex(header.section_name_table_idx)), "green"))
print(tc.colored("{0:25} {1}".format("\tSection header size:", hex(header.section_header_size)), "green"))
def show_interpreter(binary):
print(tc.colored("[::] Interpreter/loader", "blue"))
if binary.has_interpreter:
print(binary.interpreter)
else:
print(tc.colored("No interpreter/loader", "yellow"))
def show_notes(binary):
print(tc.colored("[::] Notes section", "blue"))
if binary.has_notes:
for n in binary.notes:
print(n)
else:
print(tc.colored("No notes section", "yellow"))
def enum_dyn_entries(binary):
print(tc.colored("[::] Dynamic entries", "blue"))
for e in binary.dynamic_entries:
print(e)
def enum_dyn_relocs(binary):
print(tc.colored("[::] Dynamic relocations", "blue"))
for r in binary.dynamic_relocations:
print(r)
def enum_exp_funcs(binary):
print(tc.colored("[::] Exported functions", "blue"))
for f in binary.exported_functions:
print(f)
def enum_exp_symbols(binary):
print(tc.colored("[::] Exported symbols", "blue"))
for s in binary.exported_symbols:
print(s)
def enum_imp_functions(binary):
print(tc.colored("[::] Imported functions", "blue"))
for f in binary.imported_functions:
print(f)
def enum_imp_symbols(binary):
print(tc.colored("[::] Imported symbols", "blue"))
for s in binary.imported_symbols:
print(s)
def enum_libraries(binary):
print(tc.colored("[::] Libraries", "blue"))
for l in binary.libraries:
print(l)
def enum_sections(binary):
print(tc.colored("[::] Sections", "blue"))
for s in binary.sections:
print(s)
# Properties
print(tc.colored("\t{0:15} {1}".format("Alignment", hex(s.alignment)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Entropy", s.entropy), "cyan"))
print(tc.colored("\t{0:15} {1}".format("File offset", hex(s.file_offset)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Flags", s.flags), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Information", s.information), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Link", s.link), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Name index", s.name_idx), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Offset", hex(s.offset)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Original size", hex(s.original_size)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Size", hex(s.size)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Type", get_typeval_as_str(s.type)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Virtual addr", hex(s.virtual_address)), "cyan"))
def enum_segments(binary):
print(tc.colored("[::] Segments", "blue"))
for s in binary.segments:
print(s)
# Properties
print(tc.colored("\t{0:15} {1}".format("Alignment", hex(s.alignment)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("File offset", hex(s.file_offset)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Flags", s.flags), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Type", get_typeval_as_str(s.type)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Virtual addr", hex(s.virtual_address)), "cyan"))
print(tc.colored("\t{0:15} {1}".format("Virtual size", hex(s.virtual_size)), "cyan"))
def run():
if len(sys.argv) < 2:
print("[USAGE]: {0} <executable>".format(sys.argv[0]))
sys.exit(1)
try:
binary = lief.ELF.parse(sys.argv[1])
except lief.bad_file as err:
print("Error: {0}".format(err))
sys.exit(1)
show_name(binary)
enum_header(binary.header)
enum_dyn_entries(binary)
enum_dyn_relocs(binary)
enum_exp_funcs(binary)
enum_exp_symbols(binary)
enum_imp_functions(binary)
enum_imp_symbols(binary)
enum_libraries(binary)
show_notes(binary)
show_interpreter(binary)
enum_sections(binary)
enum_segments(binary)
if __name__ == "__main__":
run()
| mit | -4,353,584,598,591,956,500 | 41.33125 | 128 | 0.557065 | false |
angstwad/ansible | lib/ansible/galaxy/token.py | 68 | 2167 | #!/usr/bin/env python
########################################################################
#
# (C) 2015, Chris Houseknecht <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import yaml
from stat import *
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyToken(object):
''' Class to storing and retrieving token in ~/.ansible_galaxy '''
def __init__(self):
self.file = os.path.expanduser("~") + '/.ansible_galaxy'
self.config = yaml.safe_load(self.__open_config_for_read())
if not self.config:
self.config = {}
def __open_config_for_read(self):
if os.path.isfile(self.file):
display.vvv('Opened %s' % self.file)
return open(self.file, 'r')
# config.yml not found, create and chomd u+rw
f = open(self.file,'w')
f.close()
os.chmod(self.file,S_IRUSR|S_IWUSR) # owner has +rw
display.vvv('Created %s' % self.file)
return open(self.file, 'r')
def set(self, token):
self.config['token'] = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.file,'w') as f:
yaml.safe_dump(self.config,f,default_flow_style=False)
| gpl-3.0 | -5,328,485,136,523,889,000 | 31.358209 | 72 | 0.601292 | false |
robbiet480/home-assistant | tests/components/folder_watcher/test_init.py | 3 | 1834 | """The tests for the folder_watcher component."""
import os
from homeassistant.components import folder_watcher
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
async def test_invalid_path_setup(hass):
"""Test that an invalid path is not set up."""
assert not await async_setup_component(
hass,
folder_watcher.DOMAIN,
{folder_watcher.DOMAIN: {folder_watcher.CONF_FOLDER: "invalid_path"}},
)
async def test_valid_path_setup(hass):
"""Test that a valid path is setup."""
cwd = os.path.join(os.path.dirname(__file__))
hass.config.whitelist_external_dirs = {cwd}
with patch.object(folder_watcher, "Watcher"):
assert await async_setup_component(
hass,
folder_watcher.DOMAIN,
{folder_watcher.DOMAIN: {folder_watcher.CONF_FOLDER: cwd}},
)
def test_event():
"""Check that Home Assistant events are fired correctly on watchdog event."""
class MockPatternMatchingEventHandler:
"""Mock base class for the pattern matcher event handler."""
def __init__(self, patterns):
pass
with patch(
"homeassistant.components.folder_watcher.PatternMatchingEventHandler",
MockPatternMatchingEventHandler,
):
hass = Mock()
handler = folder_watcher.create_event_handler(["*"], hass)
handler.on_created(
Mock(is_directory=False, src_path="/hello/world.txt", event_type="created")
)
assert hass.bus.fire.called
assert hass.bus.fire.mock_calls[0][1][0] == folder_watcher.DOMAIN
assert hass.bus.fire.mock_calls[0][1][1] == {
"event_type": "created",
"path": "/hello/world.txt",
"file": "world.txt",
"folder": "/hello",
}
| apache-2.0 | -2,187,242,035,677,994,500 | 31.75 | 87 | 0.62759 | false |
bruinfish/cs118-proj2-pox | pox/messenger/__init__.py | 25 | 19645 | # Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
The POX Messenger system.
The Messenger system is a way to build services in POX that can be
consumed by external clients.
Sometimes a controller might need to interact with the outside world.
Sometimes you need to integrate with an existing piece of software and
maybe you don't get to choose how you communicate with it. Other times,
you have the opportunity and burden of rolling your own. The Messenger
system is meant to help you with the latter case.
In short, channels are a system for communicating between POX and
external programs by exchanging messages encoded in JSON. It is intended
to be quite general, both in the communication models it supports and in
the transports is supports (as of this writing, it supports a
straightforward TCP socket transport and an HTTP transport). Any
service written to use the Messenger should theoretically be usable via
any transport.
*Connections* are somehow established when a client connects via some
*Transport*. The server can individually send messages to a specific client.
A client can send messages to a *Channel* on the server. A client can also
become a member of a channel, after which it will receive any messages
the server sends to that channel. There is always a default channel with
no name.
Channels can either be permanent or temporary. Temporary channels are
automatically destroyed when they no longer contain any members.
"""
from pox.lib.revent.revent import *
from pox.core import core as core
import json
import time
import random
import hashlib
from base64 import b32encode
log = core.getLogger()
# JSON decoder used by default
defaultDecoder = json.JSONDecoder()
class ChannelJoin (Event):
""" Fired on a channel when a client joins. """
def __init__ (self, connection, channel, msg = {}):
Event.__init__(self)
self.con = connection
self.channel = channel
self.msg = msg
class ConnectionClosed (Event):
""" Fired on a connection when it closes. """
def __init__ (self, connection):
Event.__init__(self)
self.con = connection
class ChannelLeave (Event):
""" Fired on a channel when a client leaves. """
def __init__ (self, connection, channel):
Event.__init__(self)
self.con = connection
self.channel = channel
class ChannelCreate (Event):
""" Fired on a Nexus when a channel is created. """
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
class ChannelDestroy (Event):
"""
Fired on the channel and its Nexus right before a channel is destroyed.
Set .keep = True to keep the channel after all.
"""
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
self.keep = False
class ChannelDestroyed (Event):
"""
Fired on the channel and its Nexus right after a channel is destroyed.
"""
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
class MissingChannel (Event):
"""
Fired on a Nexus when a message has been received to a non-existant channel.
You can create the channel in response to this.
"""
def __init__ (self, connection, channel_name, msg):
Event.__init__(self)
self.con = connection
self.channel_name = channel_name
self.msg = msg
class MessageReceived (Event):
"""
Fired by a channel when a message has been receieved.
Always fired on the Connection itself. Also fired on the corresponding
Channel object as specified by the CHANNEL key.
The listener looks like:
def _handle_MessageReceived (event, msg):
"""
def __init__ (self, connection, channel, msg):
Event.__init__(self)
self.con = connection
self.msg = msg
self.channel = channel
def is_to_channel (self, channel):
"""
Returns True if this message is to the given channel
"""
if isinstance(channel, Channel):
channel = channel.name
if channel == self.channel: return True
if channel in self.channel: return True
return False
def _invoke (self, handler, *args, **kw):
# Special handling -- pass the message
return handler(self, self.msg, *args, **kw)
def _get_nexus (nexus):
if nexus is None: nexus = "MessengerNexus"
if isinstance(nexus, str):
if not core.hasComponent(nexus):
#TODO: Wait for channel Nexus
s = "MessengerNexus %s is not available" % (nexus,)
log.error(s)
raise RuntimeError(s)
return getattr(core, nexus)
assert isinstance(nexus, MessengerNexus)
return nexus
class Transport (object):
def __init__ (self, nexus):
self._nexus = _get_nexus(nexus)
def _forget (self, connection):
""" Forget about a connection """
raise RuntimeError("Not implemented")
class Connection (EventMixin):
"""
Superclass for Connections.
This could actually be a bit thinner, if someone wants to clean it up.
Maintains the state and handles message parsing and dispatch for a
single connection.
"""
_eventMixin_events = set([
MessageReceived,
ConnectionClosed,
])
def __init__ (self, transport):
"""
transport is the source of the connection (e.g, TCPTransport).
"""
EventMixin.__init__(self)
self._is_connected = True
self._transport = transport
self._newlines = False
# Transports that don't do their own encapsulation can use _recv_raw(),
# which uses this. (Such should probably be broken into a subclass.)
self._buf = bytes()
key,num = self._transport._nexus.generate_session()
self._session_id,self._session_num = key,num
def _send_welcome (self):
"""
Send a message to a client so they know they're connected
"""
self.send({"CHANNEL":"","cmd":"welcome","session_id":self._session_id})
def _close (self):
"""
Called internally to shut the connection down.
"""
if self._is_connected is False: return
self._transport._forget(self)
self._is_connected = False
for name,chan in self._transport._nexus._channels.items():
chan._remove_member(self)
self.raiseEventNoErrors(ConnectionClosed, self)
#self._transport._nexus.raiseEventNoErrors(ConnectionClosed, self)
def send (self, whatever):
"""
Send data over the connection.
It will first be encoded into JSON, and optionally followed with
a newline. Ultimately, it will be passed to send_raw() to actually
be sent.
"""
if self._is_connected is False: return False
s = json.dumps(whatever, default=str)
if self._newlines: s += "\n"
self.send_raw(s)
return True
def send_raw (self, data):
"""
This method should actually send data out over the connection.
Subclasses need to implement this.
"""
raise RuntimeError("Not implemented")
@property
def is_connected (self):
"""
True if this Connection is still connected.
"""
return self._is_connected
def _rx_message (self, msg):
"""
Raises events when a complete message is available.
Subclasses may want to call this when they have a new message
available. See _recv_raw().
"""
e = self.raiseEventNoErrors(MessageReceived,self,msg.get('CHANNEL'),msg)
self._transport._nexus._rx_message(self, msg)
def _rx_raw (self, data):
"""
If your subclass receives a stream instead of discrete messages, this
method can parse out individual messages and call _recv_msg() when
it has full messages.
"""
if len(data) == 0: return
if len(self._buf) == 0:
if data[0].isspace():
self._buf = data.lstrip()
else:
self._buf = data
else:
self._buf += data
while len(self._buf) > 0:
try:
msg, l = defaultDecoder.raw_decode(self._buf)
except:
# Need more data before it's a valid message
# (.. or the stream is corrupt and things will never be okay
# ever again)
return
self._buf = self._buf[l:]
if len(self._buf) != 0 and self._buf[0].isspace():
self._buf = self._buf.lstrip()
self._rx_message(msg)
def __str__ (self):
"""
Subclasses should implement better versions of this.
"""
return "<%s/%s/%i>" % (self.__class__.__name__, self._session_id,
self._session_num)
def close (self):
"""
Close the connection.
"""
self._close()
class Channel (EventMixin):
"""
Allows one to easily listen to only messages that have a CHANNEL key
with a specific name.
Generally you will not create these classes directly, but by calling
getChannel() on the ChannelNexus.
"""
_eventMixin_events = set([
MessageReceived,
ChannelJoin, # Immedaitely when a connection goes up
ChannelLeave, # When a connection goes down
ChannelDestroy,
ChannelDestroyed,
])
def __init__ (self, name, nexus = None, temporary = False):
"""
name is the name for the channel (i.e., the value for the messages'
CHANNEL key).
nexus is the specific MessengerNexus with which this channel is to be
associated (defaults to core.MessengerNexus).
"""
EventMixin.__init__(self)
assert isinstance(name, basestring)
self._name = name
self._nexus = _get_nexus(nexus)
self._nexus._channels[name] = self
self.temporary = temporary
self._members = set() # Member Connections
@property
def name (self):
return self._name
def _destroy (self):
""" Remove channel """
e = self.raiseEvent(ChannelDestroy, self)
if e:
if e.keep: return False
self._nexus.raiseEvent(e)
if e.keep: return False
del self._nexus._channels[self._name]
# We can't just do the follow because then listeners
# can't tell if the channel is now empty...
#for sub in set(self._members):
# sub.raiseEvent(ChannelLeave, sub, self)
#
#self._members.clear()
# .. so do the following really straightforward...
for sub in set(self._members):
self._remove_member(sub, allow_destroy = False)
e = ChannelDestroyed(self)
self.raiseEvent(e)
self._nexus.raiseEvent(e)
def _add_member (self, con, msg = {}):
if con in self._members: return
self._members.add(con)
self.raiseEvent(ChannelJoin, con, self, msg)
def _remove_member (self, con, allow_destroy = True):
if con not in self._members: return
self._members.remove(con)
self.raiseEvent(ChannelLeave, con, self)
if not allow_destroy: return
if self.temporary is True:
if len(self._members) == 0:
self._destroy()
def send (self, msg):
d = dict(msg)
d['CHANNEL'] = self._name
for r in self._members:
if not r.is_connected: continue
r.send(d)
def __str__ (self):
return "<Channel " + self.name + ">"
def reply (_msg, **kw):
if not isinstance(_msg, dict):
# We'll also take an event...
_msg = _msg.msg
kw['CHANNEL'] = _msg.get('CHANNEL')
if 'XID' in _msg: kw['XID'] = _msg.get('XID')
return kw
class ChannelBot (object):
"""
A very simple framework for writing "bots" that respond to messages
on a channel.
"""
def __str__ (self):
return "<%s@%s>" % (self.__class__.__name__, self.channel)
def __init__ (self, channel, nexus = None, weak = False, extra = {}):
self._startup(channel, nexus, weak, extra)
def _startup (self, channel, nexus = None, weak = False, extra = {}):
self._nexus = _get_nexus(nexus)
if isinstance(channel, Channel):
self.channel = channel
else:
self.channel = self._nexus.get_channel(channel, create=True)
self.listeners = self.channel.addListeners(self, weak = weak)
self.prefixes = None
self._init(extra)
if self.prefixes is None:
self.prefixes = []
for n in dir(self):
if n.startswith("_exec_"):
n = n.split("_")[2]
self.prefixes.append(n)
def _handle_ChannelDestroyed (self, event):
self.channel.removeListeners(self.listeners)
self._destroyed()
def _handle_ChannelJoin (self, event):
self._join(event, event.con, event.msg)
def _handle_ChannelLeave (self, event):
self._leave(event.con, len(self.channel._members) == 0)
def _handle_MessageReceived (self, event, msg):
for prefix in self.prefixes:
if prefix in event.msg:
cmd = "_exec_%s_%s" % (prefix, str(event.msg[prefix]))
if hasattr(self, cmd):
getattr(self, cmd)(event)
return #TODO: Return val?
for prefix in self.prefixes:
if prefix in event.msg:
cmd = "_exec_" + prefix
if hasattr(self, cmd):
getattr(self, cmd)(event, msg[prefix])
return #TODO: Return val?
self._unhandled(event)
def _unhandled (self, event):
""" Called when no command found """
pass
def _join (self, event, connection, msg):
""" Called when a connection joins """
pass
def _leave (self, connection, empty):
"""
Called when a connection leaves
If channel now has no members, empty is True
"""
pass
def _destroyed (self):
""" Called when channel is destroyed """
pass
def _init (self, extra):
"""
Called during initialization
'extra' is any additional information passed in when initializing
the bot. In particular, this may be the message that goes along
with its invitation into a channel.
"""
pass
def reply (__self, __event, **kw):
"""
Unicast reply to a specific message.
"""
__event.con.send(reply(__event, **kw))
def send (__self, __msg={}, **kw):
"""
Send a message to all members of this channel.
"""
m = {}
m.update(__msg)
m.update(kw)
__self.channel.send(m)
class DefaultChannelBot (ChannelBot):
def _init (self, extra):
self._bots = {}
def add_bot (self, bot, name = None):
"""
Registers a bot (an instance of ChannelBot) so that it can be
invited to other channels.
"""
assert issubclass(bot, ChannelBot)
if name is None:
name = bot.__name__
self._bots[name] = bot
def _exec_newlines_False (self, event):
event.con._newlines = False
def _exec_newlines_True (self, event):
event.con._newlines = True
def _exec_cmd_invite (self, event):
"""
Invites a bot that has been registered with add_bot() to a channel.
Note that you can invite a bot to an empty (new) temporary channel.
It will stay until the first member leaves.
"""
botname = event.msg.get('bot')
botclass = self._bots.get(botname)
channel = event.msg.get('channel')
new_channel = False
if channel is None:
new_channel = True
channel = self._gen_channel_name(event.msg.get("prefix", "temp"))
chan = self._nexus.get_channel(channel, create=True, temporary=True)
if chan is None:
#TODO: send an error
log.warning("A bot was invited to a nonexistent channel (%s)"
% (channel,))
return
if botclass is None:
#TODO: send an error
log.warning("A nonexistent bot (%s) was invited to a channel"
% (botname,))
return
bot = botclass(channel, self._nexus)
if new_channel:
self.reply(event, new_channel = new_channel)
def _unhandled (self, event):
log.warn("Default channel got unknown command: "
+ str(event.msg.get('cmd')))
def _gen_channel_name (self, prefix = "temp"):
""" Makes up a channel name """
prefix += "_"
import random
while True:
# Sloppy
r = random.randint(1, 100000)
n = prefix + str(r)
if r not in self._nexus._channels:
break
return n
def _exec_cmd_new_channel (self, event):
""" Generates a new channel with random name """
prefix = event.msg.get('prefix', 'temp')
n = self._gen_channel_name(prefix)
ch = self._nexus.get_channel(n, create=True, temporary=True)
ch._add_member(event.con, event.msg)
self.reply(event, new_channel = n)
def _exec_cmd_join_channel (self, event):
""" Joins/creates a channel """
temp = event.msg.get('temporary', True) # Default temporary!
ch = self._nexus.get_channel(event.msg['channel'], temporary=temp)
if ch is None: return
ch._add_member(event.con, event.msg)
def _exec_cmd_leave_channel (self, event):
ch = self._nexus.get_channel(event.msg['channel'])
if ch is None: return
ch._remove_member(event.con)
def _exec_test (self, event, value):
log.info("Default channel got: " + str(value))
self.reply(event, test = value.upper())
class MessengerNexus (EventMixin):
"""
Transports, Channels, etc. are all associated with a MessengerNexus.
Typically, there is only one, and it is registered as
pox.core.MessengerNexus
"""
_eventMixin_events = set([
MissingChannel, # When a msg arrives to nonexistent channel
ChannelDestroy,
ChannelDestroyed,
ChannelCreate,
])
def __init__ (self):
EventMixin.__init__(self)
self._channels = {} # name -> Channel
self.default_bot = DefaultChannelBot("", self)
self._next_ses = 1
self._session_salt = str(time.time())
def generate_session (self):
"""
Return a new session ID tuple (key, num)
The key is a unique and not-trivial-to-guess alphanumeric value
associated with the session.
The num is a unique numerical value associated with the session.
"""
r = self._next_ses
self._next_ses += 1
key = str(random.random()) + str(time.time()) + str(r)
key += str(id(key)) + self._session_salt
key = b32encode(hashlib.md5(key).digest()).upper().replace('=','')
def alphahex (r):
""" base 16 on digits 'a' through 'p' """
r=hex(r)[2:].lower()
return ''.join(chr((10 if ord(x) >= 97 else 49) + ord(x)) for x in r)
key = alphahex(r) + key
return key,r
def get_channel (self, name, create = True, temporary = False):
if name is None: name = ""
if name in self._channels:
return self._channels[name]
elif create:
c = Channel(name, self, temporary = temporary)
self.raiseEvent(ChannelCreate, c)
return c
else:
return None
def _rx_message (self, con, msg):
"""
Dispatches messages to listeners of this nexus and to its Channels.
Called by Connections.
"""
ret = False
assert isinstance(msg, dict)
if isinstance(msg, dict):
channels = msg.get('CHANNEL')
if channels is None:
channels = [""]
if not isinstance(channels, list):
channels = [channels]
for cname in channels:
channel = self.get_channel(cname, create=False)
if channel is None:
e = self.raiseEvent(MissingChannel, con, cname, msg)
if e is not None: cname = e.channel_name
channel = self.get_channel(cname, create=False)
if channel is not None:
#print "raise on", channel
channel.raiseEvent(MessageReceived, con, channel, msg)
ret = True
return ret
def launch ():
core.registerNew(MessengerNexus)
| gpl-3.0 | 3,102,707,311,107,334,700 | 27.889706 | 78 | 0.645304 | false |
richpsharp/forest_carbon_edge_effects | average_human_use_layers.py | 1 | 8405 | import os
import time
import numpy
import functools
import sys
import codecs
import types
import gdal
import osr
from invest_natcap import raster_utils
GLOBAL_UPPER_LEFT_ROW = 2602195.7925872812047601
GLOBAL_UPPER_LEFT_COL = -11429693.3490753173828125
def average_layers():
base_table_uri = "C:/Users/rich/Desktop/all_grid_results_100km_clean_v2.csv"
base_table_file = open(base_table_uri, 'rU')
table_header = base_table_file.readline()
#need to mask the average layers to the biomass regions
giant_layer_uri = "C:/Users/rich/Desktop/average_layers_projected/giant_layer.tif"
af_uri = "C:/Users/rich/Desktop/af_biov2ct1.tif"
am_uri = "C:/Users/rich/Desktop/am_biov2ct1.tif"
as_uri = "C:/Users/rich/Desktop/as_biov2ct1.tif"
cell_size = raster_utils.get_cell_size_from_uri(am_uri)
#raster_utils.vectorize_datasets(
# [af_uri, am_uri, as_uri], lambda x,y,z: x+y+z, giant_layer_uri, gdal.GDT_Float32,
# -1, cell_size, 'union', vectorize_op=False)
table_uri = base_table_uri
table_file = open(table_uri, 'rU')
table_header = table_file.readline().rstrip()
lookup_table = raster_utils.get_lookup_from_csv(table_uri, 'ID100km')
out_table_uri = "C:/Users/rich/Desktop/all_grid_results_100km_human_elevation.csv"
out_table_file = codecs.open(out_table_uri, 'w', 'utf-8')
average_raster_list = [
("C:/Users/rich/Desktop/average_layers_projected/lighted_area_luminosity.tif", 'Lighted area density'),
("C:/Users/rich/Desktop/average_layers_projected/fi_average.tif", 'Fire densities'),
("C:/Users/rich/Desktop/average_layers_projected/glbctd1t0503m.tif", 'FAO_Cattle'),
("C:/Users/rich/Desktop/average_layers_projected/glbgtd1t0503m.tif", 'FAO_Goat'),
("C:/Users/rich/Desktop/average_layers_projected/glbpgd1t0503m.tif", 'FAO_Pig'),
("C:/Users/rich/Desktop/average_layers_projected/glbshd1t0503m.tif", 'FAO_Sheep'),
("C:/Users/rich/Desktop/average_layers_projected/glds00ag.tif", 'Human population density AG'),
("C:/Users/rich/Desktop/average_layers_projected/glds00g.tif", 'Human population density G'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_11.tif', '"11: Urban, Dense settlement"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_12.tif', '"12: Dense settlements, Dense settlements"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_22.tif', '"22: Irrigated villages, Villages"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_23.tif', '"23: Cropped & pastoral villages, Villages"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_24.tif', '"24: Pastoral villages, Villages"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_25.tif', '"25: Rainfed villages, Villages"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_26.tif', '"26: Rainfed mosaic villages, Villages"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_31.tif', '"31: Residential irrigated cropland, Croplands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_32.tif', '"32: Residential rainfed mosaic, Croplands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_33.tif', '"33: Populated irrigated cropland, Croplands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_34.tif', '"34: Populated rainfed cropland, Croplands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_35.tif', '"35: Remote croplands, Croplands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_41.tif', '"41: Residential rangelands, Rangelands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_42.tif', '"42: Populated rangelands, Rangelands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_43.tif', '"43: Remote rangelands, Rangelands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_51.tif', '"51: Populated forests, Forested"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_52.tif', '"52: Remote forests, Forested"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_61.tif', '"61: Wild forests, Wildlands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_62.tif', '"62: Sparse trees, Wildlands"'),
('C:/Users/rich/Desktop/average_layers_projected/anthrome_63.tif', '"63: Barren, Wildlands"'),
("C:/Users/rich/Desktop/average_layers_projected/5km_global_pantropic_dem.tif", '"Average Elevation"'),
]
clipped_raster_list = []
for average_raster_uri, header in average_raster_list:
print 'clipping ' + average_raster_uri
clipped_raster_uri = os.path.join(os.path.dirname(average_raster_uri), 'temp', os.path.basename(average_raster_uri))
cell_size = raster_utils.get_cell_size_from_uri(average_raster_uri)
raster_utils.vectorize_datasets(
[average_raster_uri, giant_layer_uri], lambda x,y: x, clipped_raster_uri, gdal.GDT_Float32,
-1, cell_size, 'intersection', vectorize_op=False)
clipped_raster_list.append((clipped_raster_uri, header))
dataset_list = [gdal.Open(uri) for uri, label in clipped_raster_list]
band_list = [ds.GetRasterBand(1) for ds in dataset_list]
nodata_list = [band.GetNoDataValue() for band in band_list]
extended_table_headers = ','.join([header for _, header in average_raster_list])
def write_to_file(value):
try:
out_table_file.write(value)
except UnicodeDecodeError as e:
out_table_file.write(value.decode('latin-1'))
write_to_file(table_header + ',' + extended_table_headers + '\n')
#print table_header + ',' + extended_table_headers
for line in table_file:
split_line = line.rstrip().split(',')
grid_id = split_line[2]
#for grid_id in lookup_table:
try:
split_grid_id = grid_id.split('-')
grid_row_index, grid_col_index = map(int, split_grid_id)
except ValueError as e:
month_to_number = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}
grid_row_index, grid_col_index = month_to_number[split_grid_id[0]], int(split_grid_id[1])
print 'processing grid id ' + grid_id
ds = dataset_list[0]
base_srs = osr.SpatialReference(ds.GetProjection())
lat_lng_srs = base_srs.CloneGeogCS()
coord_transform = osr.CoordinateTransformation(
base_srs, lat_lng_srs)
gt = ds.GetGeoTransform()
grid_resolution = 100 #100km
row_coord = grid_row_index * grid_resolution * 1000 + GLOBAL_UPPER_LEFT_ROW
col_coord = grid_col_index * grid_resolution * 1000 + GLOBAL_UPPER_LEFT_COL
lng_coord, lat_coord, _ = coord_transform.TransformPoint(
col_coord, row_coord)
write_to_file(','.join(split_line[0:2]) + ',%d-%d,' % (grid_row_index, grid_col_index) + ','.join(split_line[3:11]) +',%f,%f,' % (lat_coord, lng_coord)+','.join(split_line[13:]))
for (_, header), band, ds, nodata in zip(clipped_raster_list, band_list, dataset_list, nodata_list):
gt = ds.GetGeoTransform()
n_rows = ds.RasterYSize
n_cols = ds.RasterXSize
xoff = int(grid_col_index * (grid_resolution * 1000.0) / (gt[1]))
yoff = int(grid_row_index * (grid_resolution * 1000.0) / (-gt[5]))
win_xsize = int((grid_resolution * 1000.0) / (gt[1]))
win_ysize = int((grid_resolution * 1000.0) / (gt[1]))
if xoff + win_xsize > n_cols:
win_xsize = n_cols - xoff
if yoff + win_ysize > n_rows:
win_ysize = n_rows - yoff
block = band.ReadAsArray(
xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize)
block_average = numpy.average(block[block != nodata])
write_to_file(',%f' % block_average)
write_to_file('\n')
if __name__ == '__main__':
average_layers()
| apache-2.0 | -309,081,735,570,461,700 | 48.152047 | 186 | 0.627722 | false |
1a1a11a/mimircache | PyMimircache/cache/slru.py | 1 | 3337 | # coding=utf-8
from PyMimircache.cache.lru import LRU
from PyMimircache.cache.abstractCache import Cache
class SLRU(Cache):
def __init__(self, cache_size=1000, ratio=1, **kwargs):
"""
:param cache_size: size of cache
:param args: raio: the ratio of protected/probationary
:return:
"""
super().__init__(cache_size, **kwargs)
self.ratio = ratio
# Maybe use two linkedlist and a dict will be more efficient?
self.protected = LRU(
int(self.cache_size * self.ratio / (self.ratio + 1)))
self.probationary = LRU(int(self.cache_size * 1 / (self.ratio + 1)))
def has(self, req_id, **kwargs):
"""
:param **kwargs:
:param req_id:
:return: whether the given element is in the cache
"""
if req_id in self.protected or req_id in self.probationary:
return True
else:
return False
def _update(self, req_item, **kwargs):
""" the given element is in the cache, now update it to new location
:param **kwargs:
:param req_item:
:return: None
"""
if req_item in self.protected:
self.protected._update(req_item, )
else:
# req_item is in probationary, remove from probationary, insert to end of protected,
# evict from protected to probationary if needed
# get the node and remove from probationary
node = self.probationary.cache_dict[req_item]
self.probationary.cache_linked_list.remove_node(node)
del self.probationary.cache_dict[req_item]
# insert into protected
evicted_key = self.protected._insert(node.content, )
# if there are req_item evicted from protected area, add to probationary area
if evicted_key:
self.probationary._insert(evicted_key, )
def _insert(self, req_item, **kwargs):
"""
the given element is not in the cache, now insert it into cache
:param **kwargs:
:param req_item:
:return: evicted element
"""
return self.probationary._insert(req_item, )
def _print_cache_line(self):
print("protected: ")
self.protected._print_cache_line()
print("probationary: ")
self.probationary._print_cache_line()
def evict(self, **kwargs):
"""
evict one element from the cache line
:param **kwargs:
:return: True on success, False on failure
"""
pass
def access(self, req_item, **kwargs):
"""
:param **kwargs:
:param req_item: a cache request, it can be in the cache, or not
:return: None
"""
if self.has(req_item, ):
self._update(req_item, )
return True
else:
self._insert(req_item, )
return False
def __repr__(self):
return "SLRU, given size: {}, given protected part size: {}, given probationary part size: {}, \
current protected part size: {}, current probationary size: {}". \
format(self.cache_size, self.protected.cache_size, self.probationary.cache_size,
self.protected.cache_linked_list.size, self.probationary.cache_linked_list.size)
| gpl-3.0 | 6,881,207,422,378,916,000 | 33.760417 | 104 | 0.576266 | false |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/function/wrapper/svm_backward.py | 1 | 1775 | import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
def svm_backward(X, y, n_selected_features):
"""
This function implements the backward feature selection algorithm based on SVM
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
y: {numpy array}, shape (n_samples,)
input class labels
n_selected_features: {int}
number of selected features
Output
------
F: {numpy array}, shape (n_features, )
index of selected features
"""
n_samples, n_features = X.shape
# using 10 fold cross validation
cv = KFold(n_samples, n_folds=10, shuffle=True)
# choose SVM as the classifier
clf = SVC()
# selected feature set, initialized to contain all features
F = range(n_features)
count = n_features
while count > n_selected_features:
max_acc = 0
for i in range(n_features):
if i in F:
F.remove(i)
X_tmp = X[:, F]
acc = 0
for train, test in cv:
clf.fit(X_tmp[train], y[train])
y_predict = clf.predict(X_tmp[test])
acc_tmp = accuracy_score(y[test], y_predict)
acc += acc_tmp
acc = float(acc)/10
F.append(i)
# record the feature which results in the largest accuracy
if acc > max_acc:
max_acc = acc
idx = i
# delete the feature which results in the largest accuracy
F.remove(idx)
count -= 1
return np.array(F)
| mit | 5,397,828,943,415,351,000 | 28.084746 | 82 | 0.530141 | false |
jalexanderqed/rocksdb | build_tools/precommit_checker.py | 2 | 5638 | #!/usr/local/fbcode/gcc-4.8.1-glibc-2.17-fb/bin/python2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import commands
import subprocess
import sys
import re
import os
import time
#
# Simple logger
#
class Log:
def __init__(self, filename):
self.filename = filename
self.f = open(self.filename, 'w+', 0)
def caption(self, str):
line = "\n##### %s #####\n" % str
if self.f:
self.f.write("%s \n" % line)
else:
print(line)
def error(self, str):
data = "\n\n##### ERROR ##### %s" % str
if self.f:
self.f.write("%s \n" % data)
else:
print(data)
def log(self, str):
if self.f:
self.f.write("%s \n" % str)
else:
print(str)
#
# Shell Environment
#
class Env(object):
def __init__(self, logfile, tests):
self.tests = tests
self.log = Log(logfile)
def shell(self, cmd, path=os.getcwd()):
if path:
os.chdir(path)
self.log.log("==== shell session ===========================")
self.log.log("%s> %s" % (path, cmd))
status = subprocess.call("cd %s; %s" % (path, cmd), shell=True,
stdout=self.log.f, stderr=self.log.f)
self.log.log("status = %s" % status)
self.log.log("============================================== \n\n")
return status
def GetOutput(self, cmd, path=os.getcwd()):
if path:
os.chdir(path)
self.log.log("==== shell session ===========================")
self.log.log("%s> %s" % (path, cmd))
status, out = commands.getstatusoutput(cmd)
self.log.log("status = %s" % status)
self.log.log("out = %s" % out)
self.log.log("============================================== \n\n")
return status, out
#
# Pre-commit checker
#
class PreCommitChecker(Env):
def __init__(self, args):
Env.__init__(self, args.logfile, args.tests)
self.ignore_failure = args.ignore_failure
#
# Get commands for a given job from the determinator file
#
def get_commands(self, test):
status, out = self.GetOutput(
"build_tools/rocksdb-lego-determinator %s" % test, ".")
return status, out
#
# Run a specific CI job
#
def run_test(self, test):
self.log.caption("Running test %s locally" % test)
# get commands for the CI job determinator
status, cmds = self.get_commands(test)
if status != 0:
self.log.error("Error getting commands for test %s" % test)
return False
# Parse the JSON to extract the commands to run
cmds = re.findall("'shell':'([^\']*)'", cmds)
if len(cmds) == 0:
self.log.log("No commands found")
return False
# Run commands
for cmd in cmds:
# Replace J=<..> with the local environment variable
if "J" in os.environ:
cmd = cmd.replace("J=1", "J=%s" % os.environ["J"])
cmd = cmd.replace("make ", "make -j%s " % os.environ["J"])
# Run the command
status = self.shell(cmd, ".")
if status != 0:
self.log.error("Error running command %s for test %s"
% (cmd, test))
return False
return True
#
# Run specified CI jobs
#
def run_tests(self):
if not self.tests:
self.log.error("Invalid args. Please provide tests")
return False
self.print_separator()
self.print_row("TEST", "RESULT")
self.print_separator()
result = True
for test in self.tests:
start_time = time.time()
self.print_test(test)
result = self.run_test(test)
elapsed_min = (time.time() - start_time) / 60
if not result:
self.log.error("Error running test %s" % test)
self.print_result("FAIL (%dm)" % elapsed_min)
if not self.ignore_failure:
return False
result = False
else:
self.print_result("PASS (%dm)" % elapsed_min)
self.print_separator()
return result
#
# Print a line
#
def print_separator(self):
print("".ljust(60, "-"))
#
# Print two colums
#
def print_row(self, c0, c1):
print("%s%s" % (c0.ljust(40), c1.ljust(20)))
def print_test(self, test):
print(test.ljust(40), end="")
sys.stdout.flush()
def print_result(self, result):
print(result.ljust(20))
#
# Main
#
parser = argparse.ArgumentParser(description='RocksDB pre-commit checker.')
# --log <logfile>
parser.add_argument('--logfile', default='/tmp/precommit-check.log',
help='Log file. Default is /tmp/precommit-check.log')
# --ignore_failure
parser.add_argument('--ignore_failure', action='store_true', default=False,
help='Stop when an error occurs')
# <test ....>
parser.add_argument('tests', nargs='+',
help='CI test(s) to run. e.g: unit punit asan tsan ubsan')
args = parser.parse_args()
checker = PreCommitChecker(args)
print("Please follow log %s" % checker.log.filename)
if not checker.run_tests():
print("Error running tests. Please check log file %s"
% checker.log.filename)
sys.exit(1)
sys.exit(0)
| bsd-3-clause | -1,794,707,275,809,447,200 | 26.105769 | 78 | 0.521462 | false |
kastriothaliti/techstitution | venv/lib/python3.5/site-packages/wheel/util.py | 345 | 4890 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
class OrderedDefaultDict(OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
| gpl-3.0 | -2,763,025,289,578,280,400 | 28.281437 | 90 | 0.604499 | false |
kirienko/gourmet | src/gourmet/threadManager.py | 1 | 15304 | # This module is designed to handle all multi-threading processes in
# Gourmet. Separate threads are limited to doing the following things
# with respect to the GUI:
#
# 1. Start a notification dialog with a progress bar
# 2. Update the progress bar
# 3. Finish successfully
# 4. Stop with an error.
#
# If you need to get user input in the middle of your threaded process,
# you need to redesign so that it works as follows:
#
# 1. Run the first half of your process as a thread.
# 2. Upon completion of your thread, run your dialog to get your user
# input
# 3. Run the second half of your process as a thread.
#
# In this module, we define the following base classes...
#
# A singleton ThreadingManager that tracks how many threads we have
# running, and allows a maximum number of threads to be run at any
# single time.
#
# A SuspendableThread base class for creating and running threaded
# processes.
import threading
import time
import traceback
import webbrowser
from gettext import ngettext
from typing import Any
from gi.repository import GLib, GObject, Gtk, Pango
from gourmet.gtk_extras.dialog_extras import show_message
from gourmet.i18n import _
# _IdleObject etc. based on example John Stowers
# <[email protected]>
class _IdleObject(GObject.GObject):
"""
Override GObject.GObject to always emit signals in the main thread
by emitting on an idle handler
"""
def __init__(self):
GObject.GObject.__init__(self)
def emit(self, *args):
if args[0] != 'progress':
print('emit', *args)
GLib.idle_add(GObject.GObject.emit, self, *args)
class Terminated (Exception):
def __init__ (self, value):
self.value=value
def __str__(self):
return repr(self.value)
class SuspendableThread (threading.Thread, _IdleObject):
"""A class for long-running processes that shouldn't interrupt the
GUI.
"""
__gsignals__ = {
'completed' : (GObject.SignalFlags.RUN_LAST, None, []),
'progress' : (GObject.SignalFlags.RUN_LAST, None,
[GObject.TYPE_FLOAT, GObject.TYPE_STRING]), #percent complete, progress bar text
'error' : (GObject.SignalFlags.RUN_LAST, None, [GObject.TYPE_INT, # error number
GObject.TYPE_STRING, # error name
GObject.TYPE_STRING # stack trace
]),
'stopped': (GObject.SignalFlags.RUN_LAST, None, []), # emitted when we are stopped
'pause': (GObject.SignalFlags.RUN_LAST, None, []), # emitted when we pause
'resume': (GObject.SignalFlags.RUN_LAST, None, []), # emitted when we resume
'done': (GObject.SignalFlags.RUN_LAST, None, []), # emitted when/however we finish
}
def __init__(self, name=None):
self.initialized = False
#self.name = name
self.suspended = False
self.terminated = False
self.done = False
_IdleObject.__init__(self)
threading.Thread.__init__(self, name=name)
def initialize_thread (self):
self.initialized = True
self.start()
def connect_subthread (self, subthread):
'''For subthread subthread, connect to error and pause signals and
and emit as if they were our own.'''
subthread.connect('error',lambda st,enum,ename,strace: self.emit('error',enum,ename,strace))
subthread.connect('stopped',lambda st: self.emit('stopped'))
subthread.connect('pause',lambda st: self.emit('pause'))
subthread.connect('resume',lambda st: self.emit('resume'))
def run (self):
try:
self.do_run()
except Terminated:
self.emit('stopped')
except:
self.emit('error', 1, f'Error during {self.name}',
traceback.format_exc())
else:
self.emit('completed')
self.done = True
self.emit('done')
def do_run (self):
# Note that sub-classes need to call check_for_sleep
# periodically, otherwise pausing & cancelling won't work
raise NotImplementedError
def suspend (self):
self.suspended = True
def resume (self):
self.suspended = False
def terminate (self):
self.terminated = True
self.emit('stopped')
def check_for_sleep (self):
"""Check whether we have been suspended or terminated.
"""
emit_resume = False
if self.terminated:
raise Terminated('%s terminated'%self.name)
if self.suspended:
self.emit('pause')
emit_resume = True
while self.suspended:
if self.terminated:
raise Terminated('%s terminated'%self.name)
time.sleep(1)
if emit_resume:
self.emit('resume')
def __repr__ (self):
try:
return threading.Thread.__repr__(self)
except AssertionError:
return '<SuspendableThread %s - uninitialized>'%self.name
class NotThreadSafe:
"""Subclasses of this do things that are not thread safe. An error
will be raised if an object that is an instance of this class is
added to a thread manager.
"""
pass
class ThreadManager:
__single = None
@classmethod
def instance(cls):
if ThreadManager.__single is None:
ThreadManager.__single = cls()
return ThreadManager.__single
def __init__ (self, max_concurrent_threads = 2):
self.max_concurrent_threads = max_concurrent_threads
self.thread_queue = []
self.count = 0
self.active_count = 0
self.threads = []
def add_thread (self, thread):
try:
assert(isinstance(thread,SuspendableThread))
except AssertionError:
print('Class',thread,type(thread),'is not a SuspendableThread')
raise
if isinstance(thread,NotThreadSafe):
raise TypeError("Thread %s is NotThreadSafe"%thread)
self.threads.append(thread)
thread.connect('pause',self.register_thread_paused)
thread.connect('resume',self.register_thread_resume)
thread.connect('done',self.register_thread_done)
if self.active_count < self.max_concurrent_threads:
self.active_count += 1
thread.initialize_thread()
else:
self.thread_queue.append(thread)
def register_thread_done (self, thread):
if thread in self.threads:
self.threads.remove(thread)
self.active_count -= 1
self.start_queued_threads()
def register_thread_paused (self, thread):
self.active_count -= 1
self.start_queued_threads()
def register_thread_resume (self, thread):
self.active_count += 1
def resume_thread (self, thread):
if self.active_count < self.max_concurrent_threads:
thread.resume()
self.active_count += 1
else:
self.thread_queue.append(thread)
def start_queued_threads (self):
while self.active_count < self.max_concurrent_threads and self.thread_queue:
thread_to_add = self.thread_queue.pop()
self.active_count += 1
if thread_to_add.initialized:
thread_to_add.resume()
else:
thread_to_add.initialize_thread()
def get_thread_manager ():
return ThreadManager.instance()
class ThreadManagerGui:
__single = None
paused_text = ' (' + _('Paused') + ')'
PAUSE = 10
@classmethod
def instance(cls):
if ThreadManagerGui.__single is None:
ThreadManagerGui.__single = cls()
return ThreadManagerGui.__single
def __init__ (self, messagebox=None):
self.tm = get_thread_manager()
self.threads = {}
if messagebox is None:
# import done here to avoid cycling imports
from gourmet.GourmetRecipeManager import RecGui
self.messagebox = RecGui.instance().messagebox
else:
self.messagebox = messagebox
self.to_remove = [] # a list of widgets to remove when we close...
def response (self, dialog, response):
if response==Gtk.ResponseType.CLOSE:
self.close()
def importer_thread_done(self, thread):
# The following logic allows different messages to be displayed
# depending on if a recipe was actually imported or if the user
# cancelled the request.
if (len(thread.added_recs) > 0):
done_message = ngettext("Recipe successfully imported",
"Recipes successfully imported",
len(thread.added_recs))
elif (len(thread.added_recs) == 0):
done_message = _("Import Unsuccessful")
self.notification_thread_done(thread, done_message)
def notification_thread_done(self, thread, message):
infobox = Gtk.InfoBar()
infobox.set_message_type(Gtk.MessageType.INFO)
infobox.add_button(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)
infobox.connect('response', lambda ib, response_id: ib.hide())
infobox.show_all()
self.messagebox.pack_start(infobox, True, True, 0)
label = Gtk.Label()
label.set_markup(message)
label.connect('activate-link', lambda lbl, uri: webbrowser.open(uri))
label.show()
infobox.get_content_area().add(label)
self.messagebox.show()
def register_thread_with_dialog (self, description, thread):
threadbox = Gtk.InfoBar()
threadbox.set_message_type(Gtk.MessageType.INFO)
pb = Gtk.ProgressBar()
pb.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
pause_button = Gtk.ToggleButton(label=_('Pause'))
threadbox.add_action_widget(pause_button, self.PAUSE)
dlab = Gtk.Label(label=description)
dlab.set_ellipsize(Pango.EllipsizeMode.MIDDLE)
cancel_button = threadbox.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
vbox = Gtk.VBox()
vbox.pack_start(dlab, expand=True, fill=True, padding=0)
vbox.pack_start(pb, expand=True, fill=True, padding=0)
threadbox.get_content_area().add(vbox)
threadbox.show_all()
self.messagebox.pack_start(threadbox, True, True, 0)
self.messagebox.show()
#for b in threadbox.buttons: b.show()
thread.connect('completed',self.thread_done,threadbox)
thread.connect('error',self.thread_error,threadbox)
thread.connect('stopped',self.thread_stopped,threadbox)
thread.connect('pause',self.thread_pause,threadbox)
thread.connect('resume',self.thread_resume,threadbox)
thread.connect('progress',self.progress_update,pb)
pause_button.connect('clicked',self.pause_cb,thread)
cancel_button.connect('clicked',self.cancel_cb,thread)
def pause_cb (self, b, thread):
if b.get_active():
thread.suspend()
else:
self.tm.resume_thread(thread)
def cancel_cb (self, b, thread):
thread.terminate()
def thread_done (self, thread, threadbox):
for b in threadbox.get_action_area().get_children(): b.hide()
threadbox.add_button(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)
threadbox.connect('response', lambda ib, response_id: ib.hide())
self.to_remove.append(threadbox)
pb = threadbox.get_content_area().get_children()[0].get_children()[1]
txt = pb.get_text()
if txt:
pb.set_text(txt + ' ('+_('Done')+')')
else:
pb.set_text('Done')
pb.set_fraction(.01)
for widget in threadbox.get_content_area().get_children()[0]:
widget.hide()
threadbox.hide()
def progress_update (self, thread, perc, txt, pb):
if perc >= 0.0:
pb.set_fraction(perc)
else:
pb.pulse()
pb.set_text(txt)
def thread_error(self, thread: Any, errno: int,
errname: str, trace: str, threadbox: Gtk.InfoBar):
threadbox.get_action_area().get_children()[1].hide() # Pause button
pb = threadbox.get_content_area().get_children()[0].get_children()[1]
pb.set_text(_('Error: %s') % errname)
b = threadbox.add_button(_('Details'), 11)
b.connect('clicked', self.show_traceback, errno, errname, trace)
b.show()
self.to_remove.append(threadbox)
def thread_stopped(self, thread: Any, threadbox: Gtk.InfoBar):
threadbox.hide()
def thread_pause (self, thread: Any, threadbox: Gtk.InfoBar):
pb = threadbox.get_content_area().get_children()[0].get_children()[1]
txt = pb.get_text()
txt += self.paused_text
pb.set_text(txt)
def thread_resume (self, thread: Any, threadbox: Gtk.InfoBar):
pb = threadbox.get_content_area().get_children()[0].get_children()[1]
txt = pb.get_text()
if txt.find(self.paused_text):
txt = txt[:-len(self.paused_text)]
pb.set_text(txt)
def show (self, *args):
self.messagebox.show()
def delete_event_cb (self, *args):
self.messagebox.hide()
return True
def close (self, *args):
while self.to_remove:
box_to_remove = self.to_remove.pop()
for w in box_to_remove.widgets:
w.hide()
self.pbtable.remove(w)
self.messagebox.hide()
def show_traceback (self, button: Gtk.Button, errno: int,
errname: str, traceback: str):
show_message(label=_('Error'),
sublabel=_('Error %s: %s') % (errno,errname),
expander=(_('Traceback'), traceback))
def get_thread_manager_gui ():
return ThreadManagerGui.instance()
if __name__ == '__main__':
from gi.repository import Gtk
class TestThread (SuspendableThread):
def do_run (self):
for n in range(1000):
time.sleep(0.01)
self.emit('progress',n/1000.0,'%s of 1000'%n)
self.check_for_sleep()
class TestError (SuspendableThread):
def do_run (self):
for n in range(1000):
time.sleep(0.01)
if n==100: raise AttributeError("This is a phony error")
self.emit('progress',n/1000.0,'%s of 1000'%n)
self.check_for_sleep()
class TestInterminable (SuspendableThread):
def do_run (self):
while 1:
time.sleep(0.1)
self.emit('progress',-1,'Working interminably')
self.check_for_sleep()
tm = get_thread_manager()
tmg = get_thread_manager_gui()
for desc,thread in [
('Interminable 1',TestInterminable()),
('Linear 1',TestThread()),
('Linear 2',TestThread()),
('Interminable 2',TestInterminable()),
('Error 3',TestError())
]:
tm.add_thread(thread)
tmg.register_thread_with_dialog(desc,thread)
def quit (*args): Gtk.main_quit()
tmg.dialog.connect('delete-event',quit)
tmg.show()
Gtk.main()
| gpl-2.0 | -3,014,952,524,836,481,500 | 33.861048 | 101 | 0.59821 | false |
ilismal/luhnCompliance | luhn.py | 1 | 1535 | # Luhn algorithm check
# From https://en.wikipedia.org/wiki/Luhn_algorithm
def luhn_checksum(card_number):
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(card_number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits)
for d in even_digits:
checksum += sum(digits_of(d*2))
return checksum % 10
def is_luhn_valid(card_number):
return luhn_checksum(card_number) == 0
def readPAN():
# There's no do-while in python, lazy workaround
while True:
# Read the input
# Check that's a number with 16 digits
try:
pan=int(raw_input('PAN: '))
if (len(str(pan)) != 16):
print "PAN must be 16 chars long"
else:
break
except ValueError:
print("Not a number")
return pan
# There's no do-while in python, lazy workaround
print "Please input the first PAN in range"
firstValue = readPAN()
print "Please input the last PAN in range"
lastValue = readPAN()
# Swap variables if the first value is higher than the last
if (firstValue > lastValue):
firstValue,lastValue = lastValue,firstValue
print "Valid card numbers in range {0}/{1}".format(firstValue,lastValue)
totalValid = 0
# Check if the values in the range are luhn compliant
for ccc in range(firstValue,lastValue):
if is_luhn_valid(ccc):
print "\t" + str(ccc)
totalValid += 1
print "Total: {0} valid cards in range".format(totalValid)
| unlicense | -5,032,799,650,072,255,000 | 29.7 | 72 | 0.637785 | false |
abzaloid/maps | django-project/lib/python2.7/site-packages/django/contrib/sessions/middleware.py | 25 | 2648 | import time
from importlib import import_module
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
class SessionMiddleware(object):
def __init__(self):
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| mit | 7,820,951,604,695,608,000 | 45.45614 | 87 | 0.575151 | false |
hjtabisola/final-project | appengine_config.py | 36 | 3078 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Edit the code below to add you own hooks and modify tailbone's behavior
## Base Tailbone overrides and hooks
## Set the global default namespace
# def namespace_manager_default_namespace_for_request():
# return "my_custom_namespace"
## Use JSONP for all apis
# tailbone_JSONP = False
# Use CORS for all apis
tailbone_CORS = True
tailbone_CORS_RESTRICTED_DOMAINS = ["http://localhost"]
## modify the below functions to change how users are identified
# tailbone_is_current_user_admin =
# tailbone_get_current_user =
# tailbone_create_login_url =
# tailbone_create_logout_url =
## Use cloud store instead of blobstore
# tailboneFiles_CLOUDSTORE = False
## Store counts for restful models accessible in HEAD query
# tailboneRestful_METADATA = False
## If specified is a list of tailbone.restful.ScopedModel objects these will be the only ones allowed.
## This is a next level step of model restriction to your db, this replaces validation.json
# from google.appengine.ext import ndb
# from tailbone.restful import ScopedModel
# class MyModel(ScopedModel):
# stuff = ndb.IntegerProperty()
# tailboneRestful_DEFINED_MODELS = {"mymodel": MyModel}
# tailboneRestful_RESTRICT_TO_DEFINED_MODELS = False
## Protected model names gets overridden by RESTRICTED_MODELS
# tailboneRestful_PROTECTED_MODEL_NAMES = ["(?i)tailbone.*", "custom", "(?i)users"]
## Proxy can only be used for the restricted domains if specified
# tailboneProxy_RESTRICTED_DOMAINS = ["google.com"]
## Cloud store bucket to use default is your application id
# tailboneCloudstore_BUCKET = "mybucketname"
# tailboneTurn_RESTIRCTED_DOMAINS = ["localhost"]
# tailboneTurn_SECRET = "notasecret"
# tailboneMesh_ENABLE_TURN = True
# tailboneMesh_ENABLE_WEBSOCKET = True
## Seconds until room expires
# tailboneMesh_ROOM_EXPIRATION = 86400
## Protected site
# tailboneStaticProtected_PASSWORD = "mypassword"
## the base path for the protected site can change to deploy or something else defaults to app
# tailboneStaticProtected_BASE_PATH = "app"
## Custom load balanced compute engine instance
# tailboneCustomCE_STARTUP_SCRIPT = """
# apt-get install build-essential
# curl -O http://nodejs.org/dist/v0.10.15/node-v0.10.15.tar.gz
# tar xvfz node-v0.10.15.tar.gz
# cd node-v0.10.15
# ./configure
# make
# make install
# cd ..
# rm -rf node-v0.10.15
# rm -f node-v0.10.15.tar.gz
# cat >server.js <<EOL
# %s
# EOL
# npm install ws
# node server.js
# """ % (open("client/mywebsocketserver.js").read(),) | apache-2.0 | 4,617,293,784,267,909,000 | 30.742268 | 102 | 0.749188 | false |
MaizerGomes/youtube-dl | test/test_InfoExtractor.py | 104 | 2243 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor.common import InfoExtractor
from youtube_dl.extractor import YoutubeIE, get_info_extractor
class TestIE(InfoExtractor):
pass
class TestInfoExtractor(unittest.TestCase):
def setUp(self):
self.ie = TestIE(FakeYDL())
def test_ie_key(self):
self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
def test_html_search_regex(self):
html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>'
search = lambda re, *args: self.ie._html_search_regex(re, html, *args)
self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video')
def test_opengraph(self):
ie = self.ie
html = '''
<meta name="og:title" content='Foo'/>
<meta content="Some video's description " name="og:description"/>
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
'''
self.assertEqual(ie._og_search_title(html), 'Foo')
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
def test_html_search_meta(self):
ie = self.ie
html = '''
<meta name="a" content="1" />
<meta name='b' content='2'>
<meta name="c" content='3'>
<meta name=d content='4'>
<meta property="e" content='5' >
<meta content="6" name="f">
'''
self.assertEqual(ie._html_search_meta('a', html), '1')
self.assertEqual(ie._html_search_meta('b', html), '2')
self.assertEqual(ie._html_search_meta('c', html), '3')
self.assertEqual(ie._html_search_meta('d', html), '4')
self.assertEqual(ie._html_search_meta('e', html), '5')
self.assertEqual(ie._html_search_meta('f', html), '6')
if __name__ == '__main__':
unittest.main()
| unlicense | -9,194,507,983,609,257,000 | 35.177419 | 106 | 0.606777 | false |
sdklite/gyp | test/win/win-tool/gyptest-win-tool-handles-readonly-files.py | 164 | 1699 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure overwriting read-only files works as expected (via win-tool).
"""
import TestGyp
import filecmp
import os
import stat
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
# First, create the source files.
os.makedirs('subdir')
read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']
for f in read_only_files:
test.write(f, 'source_contents')
test.chmod(f, stat.S_IREAD)
if os.access(f, os.W_OK):
test.fail_test()
# Second, create the read-only destination files. Note that we are creating
# them where the ninja and win-tool will try to copy them to, in order to test
# that copies overwrite the files.
os.makedirs(test.built_file_path('dest/subdir'))
for f in read_only_files:
f = os.path.join('dest', f)
test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')
test.chmod(test.built_file_path(f), stat.S_IREAD)
# Ensure not writable.
if os.access(test.built_file_path(f), os.W_OK):
test.fail_test()
test.run_gyp('copies_readonly_files.gyp')
test.build('copies_readonly_files.gyp')
# Check the destination files were overwritten by ninja.
for f in read_only_files:
f = os.path.join('dest', f)
test.must_contain(test.built_file_path(f), 'source_contents')
# This will fail if the files are not the same mode or contents.
for f in read_only_files:
if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):
test.fail_test()
test.pass_test()
| bsd-3-clause | -5,699,337,702,925,966,000 | 29.890909 | 80 | 0.683932 | false |
jeandet/meson | mesonbuild/modules/gnome.py | 1 | 77105 | # Copyright 2015-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for Gnome/GLib related
functionality such as gobject-introspection, gresources and gtk-doc'''
import os
import copy
import subprocess
from .. import build
from .. import mlog
from .. import mesonlib
from .. import compilers
from .. import interpreter
from . import GResourceTarget, GResourceHeaderTarget, GirTarget, TypelibTarget, VapiTarget
from . import get_include_args
from . import ExtensionModule
from . import ModuleReturnValue
from ..mesonlib import MesonException, OrderedSet, Popen_safe, extract_as_list
from ..dependencies import Dependency, PkgConfigDependency, InternalDependency
from ..interpreterbase import noKwargs, permittedKwargs, FeatureNew, FeatureNewKwargs
# gresource compilation is broken due to the way
# the resource compiler and Ninja clash about it
#
# https://github.com/ninja-build/ninja/issues/1184
# https://bugzilla.gnome.org/show_bug.cgi?id=774368
gresource_dep_needed_version = '>= 2.51.1'
native_glib_version = None
girwarning_printed = False
gdbuswarning_printed = False
gresource_warning_printed = False
_gir_has_extra_lib_arg = None
def gir_has_extra_lib_arg(intr_obj):
global _gir_has_extra_lib_arg
if _gir_has_extra_lib_arg is not None:
return _gir_has_extra_lib_arg
_gir_has_extra_lib_arg = False
try:
g_ir_scanner = intr_obj.find_program_impl('g-ir-scanner').get_command()
opts = Popen_safe(g_ir_scanner + ['--help'], stderr=subprocess.STDOUT)[1]
_gir_has_extra_lib_arg = '--extra-library' in opts
except (MesonException, FileNotFoundError, subprocess.CalledProcessError):
pass
return _gir_has_extra_lib_arg
class GnomeModule(ExtensionModule):
gir_dep = None
@staticmethod
def _get_native_glib_version(state):
global native_glib_version
if native_glib_version is None:
glib_dep = PkgConfigDependency('glib-2.0', state.environment,
{'native': True, 'required': False})
if glib_dep.found():
native_glib_version = glib_dep.get_version()
else:
mlog.warning('Could not detect glib version, assuming 2.54. '
'You may get build errors if your glib is older.')
native_glib_version = '2.54'
return native_glib_version
def __print_gresources_warning(self, state):
global gresource_warning_printed
if not gresource_warning_printed:
if not mesonlib.version_compare(self._get_native_glib_version(state), gresource_dep_needed_version):
mlog.warning('GLib compiled dependencies do not work reliably with \n'
'the current version of GLib. See the following upstream issue:',
mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=774368'))
gresource_warning_printed = True
return []
@staticmethod
def _print_gdbus_warning():
global gdbuswarning_printed
if not gdbuswarning_printed:
mlog.warning('Code generated with gdbus_codegen() requires the root directory be added to\n'
' include_directories of targets with GLib < 2.51.3:',
mlog.bold('https://github.com/mesonbuild/meson/issues/1387'))
gdbuswarning_printed = True
@FeatureNewKwargs('gnome.compile_resources', '0.37.0', ['gresource_bundle', 'export', 'install_header'])
@permittedKwargs({'source_dir', 'c_name', 'dependencies', 'export', 'gresource_bundle', 'install_header',
'install', 'install_dir', 'extra_args', 'build_by_default'})
def compile_resources(self, state, args, kwargs):
self.__print_gresources_warning(state)
glib_version = self._get_native_glib_version(state)
cmd = ['glib-compile-resources', '@INPUT@']
source_dirs, dependencies = mesonlib.extract_as_list(kwargs, 'source_dir', 'dependencies', pop=True)
if len(args) < 2:
raise MesonException('Not enough arguments; the name of the resource '
'and the path to the XML file are required')
# Validate dependencies
for (ii, dep) in enumerate(dependencies):
if hasattr(dep, 'held_object'):
dependencies[ii] = dep = dep.held_object
if not isinstance(dep, (mesonlib.File, build.CustomTarget, build.CustomTargetIndex)):
m = 'Unexpected dependency type {!r} for gnome.compile_resources() ' \
'"dependencies" argument.\nPlease pass the return value of ' \
'custom_target() or configure_file()'
raise MesonException(m.format(dep))
if isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
m = 'The "dependencies" argument of gnome.compile_resources() can not\n' \
'be used with the current version of glib-compile-resources due to\n' \
'<https://bugzilla.gnome.org/show_bug.cgi?id=774368>'
raise MesonException(m)
ifile = args[1]
if isinstance(ifile, mesonlib.File):
# glib-compile-resources will be run inside the source dir,
# so we need either 'src_to_build' or the absolute path.
# Absolute path is the easiest choice.
if ifile.is_built:
ifile = os.path.join(state.environment.get_build_dir(), ifile.subdir, ifile.fname)
else:
ifile = os.path.join(ifile.subdir, ifile.fname)
elif isinstance(ifile, str):
ifile = os.path.join(state.subdir, ifile)
elif isinstance(ifile, (interpreter.CustomTargetHolder,
interpreter.CustomTargetIndexHolder,
interpreter.GeneratedObjectsHolder)):
m = 'Resource xml files generated at build-time cannot be used ' \
'with gnome.compile_resources() because we need to scan ' \
'the xml for dependencies. Use configure_file() instead ' \
'to generate it at configure-time.'
raise MesonException(m)
else:
raise MesonException('Invalid file argument: {!r}'.format(ifile))
depend_files, depends, subdirs = self._get_gresource_dependencies(
state, ifile, source_dirs, dependencies)
# Make source dirs relative to build dir now
source_dirs = [os.path.join(state.build_to_src, state.subdir, d) for d in source_dirs]
# Always include current directory, but after paths set by user
source_dirs.append(os.path.join(state.build_to_src, state.subdir))
# Ensure build directories of generated deps are included
source_dirs += subdirs
for source_dir in OrderedSet(source_dirs):
cmd += ['--sourcedir', source_dir]
if 'c_name' in kwargs:
cmd += ['--c-name', kwargs.pop('c_name')]
export = kwargs.pop('export', False)
if not export:
cmd += ['--internal']
cmd += ['--generate', '--target', '@OUTPUT@']
cmd += mesonlib.stringlistify(kwargs.pop('extra_args', []))
gresource = kwargs.pop('gresource_bundle', False)
if gresource:
output = args[0] + '.gresource'
name = args[0] + '_gresource'
else:
output = args[0] + '.c'
name = args[0] + '_c'
if kwargs.get('install', False) and not gresource:
raise MesonException('The install kwarg only applies to gresource bundles, see install_header')
install_header = kwargs.pop('install_header', False)
if install_header and gresource:
raise MesonException('The install_header kwarg does not apply to gresource bundles')
if install_header and not export:
raise MesonException('GResource header is installed yet export is not enabled')
kwargs['input'] = args[1]
kwargs['output'] = output
kwargs['depends'] = depends
if not mesonlib.version_compare(glib_version, gresource_dep_needed_version):
# This will eventually go out of sync if dependencies are added
kwargs['depend_files'] = depend_files
kwargs['command'] = cmd
else:
depfile = kwargs['output'] + '.d'
kwargs['depfile'] = depfile
kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@']
target_c = GResourceTarget(name, state.subdir, state.subproject, kwargs)
if gresource: # Only one target for .gresource files
return ModuleReturnValue(target_c, [target_c])
h_kwargs = {
'command': cmd,
'input': args[1],
'output': args[0] + '.h',
# The header doesn't actually care about the files yet it errors if missing
'depends': depends
}
if 'build_by_default' in kwargs:
h_kwargs['build_by_default'] = kwargs['build_by_default']
if install_header:
h_kwargs['install'] = install_header
h_kwargs['install_dir'] = kwargs.get('install_dir',
state.environment.coredata.get_builtin_option('includedir'))
target_h = GResourceHeaderTarget(args[0] + '_h', state.subdir, state.subproject, h_kwargs)
rv = [target_c, target_h]
return ModuleReturnValue(rv, rv)
def _get_gresource_dependencies(self, state, input_file, source_dirs, dependencies):
cmd = ['glib-compile-resources',
input_file,
'--generate-dependencies']
# Prefer generated files over source files
cmd += ['--sourcedir', state.subdir] # Current build dir
for source_dir in source_dirs:
cmd += ['--sourcedir', os.path.join(state.subdir, source_dir)]
pc, stdout, stderr = Popen_safe(cmd, cwd=state.environment.get_source_dir())
if pc.returncode != 0:
m = 'glib-compile-resources failed to get dependencies for {}:\n{}'
mlog.warning(m.format(cmd[1], stderr))
raise subprocess.CalledProcessError(pc.returncode, cmd)
dep_files = stdout.split('\n')[:-1]
depends = []
subdirs = []
for resfile in dep_files[:]:
resbasename = os.path.basename(resfile)
for dep in dependencies:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, mesonlib.File):
if dep.fname != resbasename:
continue
dep_files.remove(resfile)
dep_files.append(dep)
subdirs.append(dep.subdir)
break
elif isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)):
fname = None
outputs = {(o, os.path.basename(o)) for o in dep.get_outputs()}
for o, baseo in outputs:
if baseo == resbasename:
fname = o
break
if fname is not None:
dep_files.remove(resfile)
depends.append(dep)
subdirs.append(dep.get_subdir())
break
else:
# In generate-dependencies mode, glib-compile-resources doesn't raise
# an error for missing resources but instead prints whatever filename
# was listed in the input file. That's good because it means we can
# handle resource files that get generated as part of the build, as
# follows.
#
# If there are multiple generated resource files with the same basename
# then this code will get confused.
try:
f = mesonlib.File.from_source_file(state.environment.get_source_dir(),
".", resfile)
except MesonException:
raise MesonException(
'Resource "%s" listed in "%s" was not found. If this is a '
'generated file, pass the target that generates it to '
'gnome.compile_resources() using the "dependencies" '
'keyword argument.' % (resfile, input_file))
dep_files.remove(resfile)
dep_files.append(f)
return dep_files, depends, subdirs
def _get_link_args(self, state, lib, depends, include_rpath=False,
use_gir_args=False):
link_command = []
# Construct link args
if isinstance(lib, build.SharedLibrary):
libdir = os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(lib))
link_command.append('-L' + libdir)
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(lib):
d = os.path.join(state.environment.get_build_dir(), d)
link_command.append('-L' + d)
if include_rpath:
link_command.append('-Wl,-rpath,' + d)
if include_rpath:
link_command.append('-Wl,-rpath,' + libdir)
depends.append(lib)
if gir_has_extra_lib_arg(self.interpreter) and use_gir_args:
link_command.append('--extra-library=' + lib.name)
else:
link_command.append('-l' + lib.name)
return link_command
def _get_dependencies_flags(self, deps, state, depends, include_rpath=False,
use_gir_args=False, separate_nodedup=False):
cflags = OrderedSet()
internal_ldflags = OrderedSet()
external_ldflags = OrderedSet()
# External linker flags that can't be de-duped reliably because they
# require two args in order, such as -framework AVFoundation
external_ldflags_nodedup = []
gi_includes = OrderedSet()
deps = mesonlib.listify(deps, unholder=True)
for dep in deps:
if isinstance(dep, InternalDependency):
cflags.update(get_include_args(dep.include_directories))
for lib in dep.libraries:
if hasattr(lib, 'held_object'):
lib = lib.held_object
internal_ldflags.update(self._get_link_args(state, lib, depends, include_rpath))
libdepflags = self._get_dependencies_flags(lib.get_external_deps(), state, depends, include_rpath,
use_gir_args, True)
cflags.update(libdepflags[0])
internal_ldflags.update(libdepflags[1])
external_ldflags.update(libdepflags[2])
external_ldflags_nodedup += libdepflags[3]
gi_includes.update(libdepflags[4])
extdepflags = self._get_dependencies_flags(dep.ext_deps, state, depends, include_rpath,
use_gir_args, True)
cflags.update(extdepflags[0])
internal_ldflags.update(extdepflags[1])
external_ldflags.update(extdepflags[2])
external_ldflags_nodedup += extdepflags[3]
gi_includes.update(extdepflags[4])
for source in dep.sources:
if hasattr(source, 'held_object'):
source = source.held_object
if isinstance(source, GirTarget):
gi_includes.update([os.path.join(state.environment.get_build_dir(),
source.get_subdir())])
# This should be any dependency other than an internal one.
elif isinstance(dep, Dependency):
cflags.update(dep.get_compile_args())
ldflags = iter(dep.get_link_args(raw=True))
for lib in ldflags:
if (os.path.isabs(lib) and
# For PkgConfigDependency only:
getattr(dep, 'is_libtool', False)):
lib_dir = os.path.dirname(lib)
external_ldflags.update(["-L%s" % lib_dir])
if include_rpath:
external_ldflags.update(['-Wl,-rpath {}'.format(lib_dir)])
libname = os.path.basename(lib)
if libname.startswith("lib"):
libname = libname[3:]
libname = libname.split(".so")[0]
lib = "-l%s" % libname
# FIXME: Hack to avoid passing some compiler options in
if lib.startswith("-W"):
continue
# If it's a framework arg, slurp the framework name too
# to preserve the order of arguments
if lib == '-framework':
external_ldflags_nodedup += [lib, next(ldflags)]
else:
external_ldflags.update([lib])
if isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir:
gi_includes.update([girdir])
elif isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
cflags.update(get_include_args(dep.get_include_dirs()))
depends.append(dep)
else:
mlog.log('dependency {!r} not handled to build gir files'.format(dep))
continue
if gir_has_extra_lib_arg(self.interpreter) and use_gir_args:
def fix_ldflags(ldflags):
fixed_ldflags = OrderedSet()
for ldflag in ldflags:
if ldflag.startswith("-l"):
ldflag = ldflag.replace('-l', '--extra-library=', 1)
fixed_ldflags.add(ldflag)
return fixed_ldflags
internal_ldflags = fix_ldflags(internal_ldflags)
external_ldflags = fix_ldflags(external_ldflags)
if not separate_nodedup:
external_ldflags.update(external_ldflags_nodedup)
return cflags, internal_ldflags, external_ldflags, gi_includes
else:
return cflags, internal_ldflags, external_ldflags, external_ldflags_nodedup, gi_includes
def _unwrap_gir_target(self, girtarget):
while hasattr(girtarget, 'held_object'):
girtarget = girtarget.held_object
if not isinstance(girtarget, (build.Executable, build.SharedLibrary)):
raise MesonException('Gir target must be an executable or shared library')
return girtarget
def _get_gir_dep(self, state):
try:
gir_dep = self.gir_dep or PkgConfigDependency('gobject-introspection-1.0',
state.environment,
{'native': True})
pkgargs = gir_dep.get_compile_args()
except Exception:
raise MesonException('gobject-introspection dependency was not found, gir cannot be generated.')
return gir_dep, pkgargs
def _scan_header(self, kwargs):
ret = []
header = kwargs.pop('header', None)
if header:
if not isinstance(header, str):
raise MesonException('header must be a string')
ret = ['--c-include=' + header]
return ret
def _scan_extra_args(self, kwargs):
return mesonlib.stringlistify(kwargs.pop('extra_args', []))
def _scan_link_withs(self, state, depends, kwargs):
ret = []
if 'link_with' in kwargs:
link_with = mesonlib.extract_as_list(kwargs, 'link_with', pop = True)
for link in link_with:
ret += self._get_link_args(state, link.held_object, depends,
use_gir_args=True)
return ret
# May mutate depends and gir_inc_dirs
def _scan_include(self, state, depends, gir_inc_dirs, kwargs):
ret = []
if 'includes' in kwargs:
includes = mesonlib.extract_as_list(kwargs, 'includes', pop = True)
for inc in includes:
if hasattr(inc, 'held_object'):
inc = inc.held_object
if isinstance(inc, str):
ret += ['--include=%s' % (inc, )]
elif isinstance(inc, GirTarget):
gir_inc_dirs += [
os.path.join(state.environment.get_build_dir(),
inc.get_subdir()),
]
ret += [
"--include-uninstalled=%s" % (os.path.join(inc.get_subdir(), inc.get_basename()), )
]
depends += [inc]
else:
raise MesonException(
'Gir includes must be str, GirTarget, or list of them')
return ret
def _scan_symbol_prefix(self, kwargs):
ret = []
if 'symbol_prefix' in kwargs:
sym_prefixes = mesonlib.stringlistify(kwargs.pop('symbol_prefix', []))
ret += ['--symbol-prefix=%s' % sym_prefix for sym_prefix in sym_prefixes]
return ret
def _scan_identifier_prefix(self, kwargs):
ret = []
if 'identifier_prefix' in kwargs:
identifier_prefix = kwargs.pop('identifier_prefix')
if not isinstance(identifier_prefix, str):
raise MesonException('Gir identifier prefix must be str')
ret += ['--identifier-prefix=%s' % identifier_prefix]
return ret
def _scan_export_packages(self, kwargs):
ret = []
if 'export_packages' in kwargs:
pkgs = kwargs.pop('export_packages')
if isinstance(pkgs, str):
ret += ['--pkg-export=%s' % pkgs]
elif isinstance(pkgs, list):
ret += ['--pkg-export=%s' % pkg for pkg in pkgs]
else:
raise MesonException('Gir export packages must be str or list')
return ret
def _scan_inc_dirs(self, kwargs):
ret = mesonlib.extract_as_list(kwargs, 'include_directories', pop = True)
for incd in ret:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
return ret
def _scan_langs(self, state, langs):
ret = []
for lang in langs:
for link_arg in state.environment.coredata.get_external_link_args(lang):
if link_arg.startswith('-L'):
ret.append(link_arg)
return ret
def _scan_gir_targets(self, state, girtargets):
ret = []
for girtarget in girtargets:
if isinstance(girtarget, build.Executable):
ret += ['--program', girtarget]
elif isinstance(girtarget, build.SharedLibrary):
libname = girtarget.get_basename()
# Needed for the following binutils bug:
# https://github.com/mesonbuild/meson/issues/1911
# However, g-ir-scanner does not understand -Wl,-rpath
# so we need to use -L instead
for d in state.backend.determine_rpath_dirs(girtarget):
d = os.path.join(state.environment.get_build_dir(), d)
ret.append('-L' + d)
ret += ['--library', libname]
# need to put our output directory first as we need to use the
# generated libraries instead of any possibly installed system/prefix
# ones.
ret += ["-L@PRIVATE_OUTDIR_ABS_%s@" % girtarget.get_id()]
return ret
def _get_girtargets_langs_compilers(self, girtargets):
ret = []
for girtarget in girtargets:
for lang, compiler in girtarget.compilers.items():
# XXX: Can you use g-i with any other language?
if lang in ('c', 'cpp', 'objc', 'objcpp', 'd'):
ret.append((lang, compiler))
break
return ret
def _get_gir_targets_deps(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_all_link_deps()
ret += girtarget.get_external_deps()
return ret
def _get_gir_targets_inc_dirs(self, girtargets):
ret = []
for girtarget in girtargets:
ret += girtarget.get_include_dirs()
return ret
def _get_langs_compilers_flags(self, state, langs_compilers):
cflags = []
internal_ldflags = []
external_ldflags = []
for lang, compiler in langs_compilers:
if state.global_args.get(lang):
cflags += state.global_args[lang]
if state.project_args.get(lang):
cflags += state.project_args[lang]
if 'b_sanitize' in compiler.base_options:
sanitize = state.environment.coredata.base_options['b_sanitize'].value
cflags += compilers.sanitizer_compile_args(sanitize)
if 'address' in sanitize.split(','):
internal_ldflags += ['-lasan'] # This must be first in ldflags
# FIXME: Linking directly to libasan is not recommended but g-ir-scanner
# does not understand -f LDFLAGS. https://bugzilla.gnome.org/show_bug.cgi?id=783892
# ldflags += compilers.sanitizer_link_args(sanitize)
return cflags, internal_ldflags, external_ldflags
def _make_gir_filelist(self, state, srcdir, ns, nsversion, girtargets, libsources):
gir_filelist_dir = state.backend.get_target_private_dir_abs(girtargets[0])
if not os.path.isdir(gir_filelist_dir):
os.mkdir(gir_filelist_dir)
gir_filelist_filename = os.path.join(gir_filelist_dir, '%s_%s_gir_filelist' % (ns, nsversion))
with open(gir_filelist_filename, 'w', encoding='utf-8') as gir_filelist:
for s in libsources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
for custom_output in s.get_outputs():
gir_filelist.write(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
custom_output) + '\n')
elif isinstance(s, mesonlib.File):
gir_filelist.write(s.rel_to_builddir(state.build_to_src) + '\n')
elif isinstance(s, build.GeneratedList):
for gen_src in s.get_outputs():
gir_filelist.write(os.path.join(srcdir, gen_src) + '\n')
else:
gir_filelist.write(os.path.join(srcdir, s) + '\n')
return gir_filelist_filename
def _make_gir_target(self, state, girfile, scan_command, depends, kwargs):
scankwargs = {'output': girfile,
'command': scan_command,
'depends': depends}
if 'install' in kwargs:
scankwargs['install'] = kwargs['install']
scankwargs['install_dir'] = kwargs.get('install_dir_gir',
os.path.join(state.environment.get_datadir(), 'gir-1.0'))
if 'build_by_default' in kwargs:
scankwargs['build_by_default'] = kwargs['build_by_default']
return GirTarget(girfile, state.subdir, state.subproject, scankwargs)
def _make_typelib_target(self, state, typelib_output, typelib_cmd, kwargs):
typelib_kwargs = {
'output': typelib_output,
'command': typelib_cmd,
}
if 'install' in kwargs:
typelib_kwargs['install'] = kwargs['install']
typelib_kwargs['install_dir'] = kwargs.get('install_dir_typelib',
os.path.join(state.environment.get_libdir(), 'girepository-1.0'))
if 'build_by_default' in kwargs:
typelib_kwargs['build_by_default'] = kwargs['build_by_default']
return TypelibTarget(typelib_output, state.subdir, state.subproject, typelib_kwargs)
# May mutate depends
def _gather_typelib_includes_and_update_depends(self, state, deps, depends):
# Need to recursively add deps on GirTarget sources from our
# dependencies and also find the include directories needed for the
# typelib generation custom target below.
typelib_includes = []
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
# Add a dependency on each GirTarget listed in dependencies and add
# the directory where it will be generated to the typelib includes
if isinstance(dep, InternalDependency):
for source in dep.sources:
if hasattr(source, 'held_object'):
source = source.held_object
if isinstance(source, GirTarget) and source not in depends:
depends.append(source)
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
# Do the same, but for dependencies of dependencies. These are
# stored in the list of generated sources for each link dep (from
# girtarget.get_all_link_deps() above).
# FIXME: Store this in the original form from declare_dependency()
# so it can be used here directly.
elif isinstance(dep, build.SharedLibrary):
for source in dep.generated:
if isinstance(source, GirTarget):
subdir = os.path.join(state.environment.get_build_dir(),
source.get_subdir())
if subdir not in typelib_includes:
typelib_includes.append(subdir)
elif isinstance(dep, PkgConfigDependency):
girdir = dep.get_pkgconfig_variable("girdir", {'default': ''})
if girdir and girdir not in typelib_includes:
typelib_includes.append(girdir)
return typelib_includes
def _get_external_args_for_langs(self, state, langs):
ret = []
for lang in langs:
ret += state.environment.coredata.get_external_args(lang)
return ret
@staticmethod
def _get_scanner_cflags(cflags):
'g-ir-scanner only accepts -I/-D/-U; must ignore all other flags'
for f in cflags:
if f.startswith(('-D', '-U', '-I')):
yield f
@staticmethod
def _get_scanner_ldflags(ldflags):
'g-ir-scanner only accepts -L/-l; must ignore -F and other linker flags'
for f in ldflags:
if f.startswith(('-L', '-l', '--extra-library')):
yield f
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'sources', 'nsversion', 'namespace', 'symbol_prefix', 'identifier_prefix',
'export_packages', 'includes', 'dependencies', 'link_with', 'include_directories',
'install', 'install_dir_gir', 'install_dir_typelib', 'extra_args',
'packages', 'header', 'build_by_default'})
def generate_gir(self, state, args, kwargs):
if not args:
raise MesonException('generate_gir takes at least one argument')
if kwargs.get('install_dir'):
raise MesonException('install_dir is not supported with generate_gir(), see "install_dir_gir" and "install_dir_typelib"')
giscanner = self.interpreter.find_program_impl('g-ir-scanner')
gicompiler = self.interpreter.find_program_impl('g-ir-compiler')
girtargets = [self._unwrap_gir_target(arg) for arg in args]
if len(girtargets) > 1 and any([isinstance(el, build.Executable) for el in girtargets]):
raise MesonException('generate_gir only accepts a single argument when one of the arguments is an executable')
self.gir_dep, pkgargs = self._get_gir_dep(state)
ns = kwargs.pop('namespace')
nsversion = kwargs.pop('nsversion')
libsources = mesonlib.extract_as_list(kwargs, 'sources', pop=True)
girfile = '%s-%s.gir' % (ns, nsversion)
srcdir = os.path.join(state.environment.get_source_dir(), state.subdir)
builddir = os.path.join(state.environment.get_build_dir(), state.subdir)
depends = [] + girtargets
gir_inc_dirs = []
langs_compilers = self._get_girtargets_langs_compilers(girtargets)
cflags, internal_ldflags, external_ldflags = self._get_langs_compilers_flags(state, langs_compilers)
deps = self._get_gir_targets_deps(girtargets)
deps += extract_as_list(kwargs, 'dependencies', pop=True, unholder=True)
typelib_includes = self._gather_typelib_includes_and_update_depends(state, deps, depends)
# ldflags will be misinterpreted by gir scanner (showing
# spurious dependencies) but building GStreamer fails if they
# are not used here.
dep_cflags, dep_internal_ldflags, dep_external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, use_gir_args=True)
cflags += list(self._get_scanner_cflags(dep_cflags))
cflags += list(self._get_scanner_cflags(self._get_external_args_for_langs(state, [lc[0] for lc in langs_compilers])))
internal_ldflags += list(self._get_scanner_ldflags(dep_internal_ldflags))
external_ldflags += list(self._get_scanner_ldflags(dep_external_ldflags))
girtargets_inc_dirs = self._get_gir_targets_inc_dirs(girtargets)
inc_dirs = self._scan_inc_dirs(kwargs)
scan_command = [giscanner]
scan_command += pkgargs
scan_command += ['--no-libtool']
scan_command += ['--namespace=' + ns, '--nsversion=' + nsversion]
scan_command += ['--warn-all']
scan_command += ['--output', '@OUTPUT@']
scan_command += self._scan_header(kwargs)
scan_command += self._scan_extra_args(kwargs)
scan_command += ['-I' + srcdir, '-I' + builddir]
scan_command += get_include_args(girtargets_inc_dirs)
scan_command += ['--filelist=' + self._make_gir_filelist(state, srcdir, ns, nsversion, girtargets, libsources)]
scan_command += self._scan_link_withs(state, depends, kwargs)
scan_command += self._scan_include(state, depends, gir_inc_dirs, kwargs)
scan_command += self._scan_symbol_prefix(kwargs)
scan_command += self._scan_identifier_prefix(kwargs)
scan_command += self._scan_export_packages(kwargs)
scan_command += ['--cflags-begin']
scan_command += cflags
scan_command += ['--cflags-end']
scan_command += get_include_args(inc_dirs)
scan_command += get_include_args(list(gi_includes) + gir_inc_dirs + inc_dirs, prefix='--add-include-path=')
scan_command += list(internal_ldflags)
scan_command += self._scan_gir_targets(state, girtargets)
scan_command += self._scan_langs(state, [lc[0] for lc in langs_compilers])
scan_command += list(external_ldflags)
scan_target = self._make_gir_target(state, girfile, scan_command, depends, kwargs)
typelib_output = '%s-%s.typelib' % (ns, nsversion)
typelib_cmd = [gicompiler, scan_target, '--output', '@OUTPUT@']
typelib_cmd += get_include_args(gir_inc_dirs, prefix='--includedir=')
for incdir in typelib_includes:
typelib_cmd += ["--includedir=" + incdir]
typelib_target = self._make_typelib_target(state, typelib_output, typelib_cmd, kwargs)
rv = [scan_target, typelib_target]
return ModuleReturnValue(rv, rv)
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@permittedKwargs({'build_by_default', 'depend_files'})
def compile_schemas(self, state, args, kwargs):
if args:
raise MesonException('Compile_schemas does not take positional arguments.')
srcdir = os.path.join(state.build_to_src, state.subdir)
outdir = state.subdir
cmd = [self.interpreter.find_program_impl('glib-compile-schemas')]
cmd += ['--targetdir', outdir, srcdir]
kwargs['command'] = cmd
kwargs['input'] = []
kwargs['output'] = 'gschemas.compiled'
if state.subdir == '':
targetname = 'gsettings-compile'
else:
targetname = 'gsettings-compile-' + state.subdir.replace('/', '_')
target_g = build.CustomTarget(targetname, state.subdir, state.subproject, kwargs)
return ModuleReturnValue(target_g, [target_g])
@permittedKwargs({'sources', 'media', 'symlink_media', 'languages'})
def yelp(self, state, args, kwargs):
if len(args) < 1:
raise MesonException('Yelp requires a project id')
project_id = args[0]
sources = mesonlib.stringlistify(kwargs.pop('sources', []))
if not sources:
if len(args) > 1:
sources = mesonlib.stringlistify(args[1:])
if not sources:
raise MesonException('Yelp requires a list of sources')
source_str = '@@'.join(sources)
langs = mesonlib.stringlistify(kwargs.pop('languages', []))
if langs:
mlog.deprecation('''The "languages" argument of gnome.yelp() is deprecated.
Use a LINGUAS file in the sources directory instead.
This will become a hard error in the future.''')
media = mesonlib.stringlistify(kwargs.pop('media', []))
symlinks = kwargs.pop('symlink_media', True)
if not isinstance(symlinks, bool):
raise MesonException('symlink_media must be a boolean')
if kwargs:
raise MesonException('Unknown arguments passed: {}'.format(', '.join(kwargs.keys())))
script = state.environment.get_build_command()
args = ['--internal',
'yelphelper',
'install',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--installdir=' + os.path.join(state.environment.get_datadir(), 'help'),
'--sources=' + source_str]
if symlinks:
args.append('--symlinks=true')
if media:
args.append('--media=' + '@@'.join(media))
if langs:
args.append('--langs=' + '@@'.join(langs))
inscript = build.RunScript(script, args)
potargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'pot',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
]
pottarget = build.RunTarget('help-' + project_id + '-pot', potargs[0],
potargs[1:], [], state.subdir, state.subproject)
poargs = state.environment.get_build_command() + [
'--internal', 'yelphelper', 'update-po',
'--subdir=' + state.subdir,
'--id=' + project_id,
'--sources=' + source_str,
'--langs=' + '@@'.join(langs),
]
potarget = build.RunTarget('help-' + project_id + '-update-po', poargs[0],
poargs[1:], [], state.subdir, state.subproject)
rv = [inscript, pottarget, potarget]
return ModuleReturnValue(None, rv)
@FeatureNewKwargs('gnome.gtkdoc', '0.37.0', ['namespace', 'mode'])
@permittedKwargs({'main_xml', 'main_sgml', 'src_dir', 'dependencies', 'install',
'install_dir', 'scan_args', 'scanobjs_args', 'gobject_typesfile',
'fixxref_args', 'html_args', 'html_assets', 'content_files',
'mkdb_args', 'ignore_headers', 'include_directories',
'namespace', 'mode', 'expand_content_files'})
def gtkdoc(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gtkdoc must have one positional argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Gtkdoc arg must be string.')
if 'src_dir' not in kwargs:
raise MesonException('Keyword argument src_dir missing.')
main_file = kwargs.get('main_sgml', '')
if not isinstance(main_file, str):
raise MesonException('Main sgml keyword argument must be a string.')
main_xml = kwargs.get('main_xml', '')
if not isinstance(main_xml, str):
raise MesonException('Main xml keyword argument must be a string.')
if main_xml != '':
if main_file != '':
raise MesonException('You can only specify main_xml or main_sgml, not both.')
main_file = main_xml
targetname = modulename + '-doc'
command = state.environment.get_build_command()
namespace = kwargs.get('namespace', '')
mode = kwargs.get('mode', 'auto')
VALID_MODES = ('xml', 'sgml', 'none', 'auto')
if mode not in VALID_MODES:
raise MesonException('gtkdoc: Mode {} is not a valid mode: {}'.format(mode, VALID_MODES))
src_dirs = mesonlib.extract_as_list(kwargs, 'src_dir')
header_dirs = []
for src_dir in src_dirs:
if hasattr(src_dir, 'held_object'):
src_dir = src_dir.held_object
if not isinstance(src_dir, build.IncludeDirs):
raise MesonException('Invalid keyword argument for src_dir.')
for inc_dir in src_dir.get_incdirs():
header_dirs.append(os.path.join(state.environment.get_source_dir(),
src_dir.get_curdir(), inc_dir))
header_dirs.append(os.path.join(state.environment.get_build_dir(),
src_dir.get_curdir(), inc_dir))
else:
header_dirs.append(src_dir)
args = ['--internal', 'gtkdoc',
'--sourcedir=' + state.environment.get_source_dir(),
'--builddir=' + state.environment.get_build_dir(),
'--subdir=' + state.subdir,
'--headerdirs=' + '@@'.join(header_dirs),
'--mainfile=' + main_file,
'--modulename=' + modulename,
'--mode=' + mode]
if namespace:
args.append('--namespace=' + namespace)
args += self._unpack_args('--htmlargs=', 'html_args', kwargs)
args += self._unpack_args('--scanargs=', 'scan_args', kwargs)
args += self._unpack_args('--scanobjsargs=', 'scanobjs_args', kwargs)
args += self._unpack_args('--gobjects-types-file=', 'gobject_typesfile', kwargs, state)
args += self._unpack_args('--fixxrefargs=', 'fixxref_args', kwargs)
args += self._unpack_args('--mkdbargs=', 'mkdb_args', kwargs)
args += self._unpack_args('--html-assets=', 'html_assets', kwargs, state)
depends = []
content_files = []
for s in mesonlib.extract_as_list(kwargs, 'content_files'):
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)):
depends.append(s)
for o in s.get_outputs():
content_files.append(os.path.join(state.environment.get_build_dir(),
state.backend.get_target_dir(s),
o))
elif isinstance(s, mesonlib.File):
content_files.append(s.absolute_path(state.environment.get_source_dir(),
state.environment.get_build_dir()))
elif isinstance(s, build.GeneratedList):
depends.append(s)
for gen_src in s.get_outputs():
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
gen_src))
elif isinstance(s, str):
content_files.append(os.path.join(state.environment.get_source_dir(),
state.subdir,
s))
else:
raise MesonException(
'Invalid object type: {!r}'.format(s.__class__.__name__))
args += ['--content-files=' + '@@'.join(content_files)]
args += self._unpack_args('--expand-content-files=', 'expand_content_files', kwargs, state)
args += self._unpack_args('--ignore-headers=', 'ignore_headers', kwargs)
args += self._unpack_args('--installdir=', 'install_dir', kwargs)
args += self._get_build_args(kwargs, state, depends)
res = [build.RunTarget(targetname, command[0], command[1:] + args, depends, state.subdir, state.subproject)]
if kwargs.get('install', True):
res.append(build.RunScript(command, args))
return ModuleReturnValue(None, res)
def _get_build_args(self, kwargs, state, depends):
args = []
deps = extract_as_list(kwargs, 'dependencies', unholder=True)
cflags, internal_ldflags, external_ldflags, gi_includes = \
self._get_dependencies_flags(deps, state, depends, include_rpath=True)
inc_dirs = mesonlib.extract_as_list(kwargs, 'include_directories')
for incd in inc_dirs:
if not isinstance(incd.held_object, (str, build.IncludeDirs)):
raise MesonException(
'Gir include dirs should be include_directories().')
cflags.update(get_include_args(inc_dirs))
ldflags = OrderedSet()
ldflags.update(internal_ldflags)
ldflags.update(external_ldflags)
if state.environment.is_cross_build():
compiler = state.environment.coredata.cross_compilers.get('c')
else:
cflags.update(state.environment.coredata.get_external_args('c'))
ldflags.update(state.environment.coredata.get_external_link_args('c'))
compiler = state.environment.coredata.compilers.get('c')
if compiler:
args += ['--cc=%s' % ' '.join(compiler.get_exelist())]
args += ['--ld=%s' % ' '.join(compiler.get_linker_exelist())]
if cflags:
args += ['--cflags=%s' % ' '.join(cflags)]
if ldflags:
args += ['--ldflags=%s' % ' '.join(ldflags)]
return args
@noKwargs
def gtkdoc_html_dir(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Must have exactly one argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Argument must be a string')
return ModuleReturnValue(os.path.join('share/gtk-doc/html', modulename), [])
@staticmethod
def _unpack_args(arg, kwarg_name, kwargs, expend_file_state=None):
if kwarg_name not in kwargs:
return []
new_args = mesonlib.extract_as_list(kwargs, kwarg_name)
args = []
for i in new_args:
if expend_file_state and isinstance(i, mesonlib.File):
i = i.absolute_path(expend_file_state.environment.get_source_dir(), expend_file_state.environment.get_build_dir())
elif expend_file_state and isinstance(i, str):
i = os.path.join(expend_file_state.environment.get_source_dir(), expend_file_state.subdir, i)
elif not isinstance(i, str):
raise MesonException(kwarg_name + ' values must be strings.')
args.append(i)
if args:
return [arg + '@@'.join(args)]
return []
def _get_autocleanup_args(self, kwargs, glib_version):
if not mesonlib.version_compare(glib_version, '>= 2.49.1'):
# Warn if requested, silently disable if not
if 'autocleanup' in kwargs:
mlog.warning('Glib version ({}) is too old to support the \'autocleanup\' '
'kwarg, need 2.49.1 or newer'.format(glib_version))
return []
autocleanup = kwargs.pop('autocleanup', 'all')
values = ('none', 'objects', 'all')
if autocleanup not in values:
raise MesonException('gdbus_codegen does not support {!r} as an autocleanup value, '
'must be one of: {!r}'.format(autocleanup, ', '.join(values)))
return ['--c-generate-autocleanup', autocleanup]
@FeatureNewKwargs('build target', '0.46.0', ['install_header', 'install_dir', 'sources'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.47.0', ['extra_args', 'autocleanup'])
@permittedKwargs({'interface_prefix', 'namespace', 'extra_args', 'autocleanup', 'object_manager', 'build_by_default',
'annotations', 'docbook', 'install_header', 'install_dir', 'sources'})
def gdbus_codegen(self, state, args, kwargs):
if len(args) not in (1, 2):
raise MesonException('gdbus_codegen takes at most two arguments, name and xml file.')
namebase = args[0]
xml_files = args[1:]
cmd = [self.interpreter.find_program_impl('gdbus-codegen')]
extra_args = mesonlib.stringlistify(kwargs.pop('extra_args', []))
cmd += extra_args
# Autocleanup supported?
glib_version = self._get_native_glib_version(state)
cmd += self._get_autocleanup_args(kwargs, glib_version)
if 'interface_prefix' in kwargs:
cmd += ['--interface-prefix', kwargs.pop('interface_prefix')]
if 'namespace' in kwargs:
cmd += ['--c-namespace', kwargs.pop('namespace')]
if kwargs.get('object_manager', False):
cmd += ['--c-generate-object-manager']
if 'sources' in kwargs:
xml_files += mesonlib.listify(kwargs.pop('sources'))
build_by_default = kwargs.get('build_by_default', False)
# Annotations are a bit ugly in that they are a list of lists of strings...
annotations = kwargs.pop('annotations', [])
if not isinstance(annotations, list):
raise MesonException('annotations takes a list')
if annotations and isinstance(annotations, list) and not isinstance(annotations[0], list):
annotations = [annotations]
for annotation in annotations:
if len(annotation) != 3 or not all(isinstance(i, str) for i in annotation):
raise MesonException('Annotations must be made up of 3 strings for ELEMENT, KEY, and VALUE')
cmd += ['--annotate'] + annotation
targets = []
install_header = kwargs.get('install_header', False)
install_dir = kwargs.get('install_dir', state.environment.coredata.get_builtin_option('includedir'))
output = namebase + '.c'
# Added in https://gitlab.gnome.org/GNOME/glib/commit/e4d68c7b3e8b01ab1a4231bf6da21d045cb5a816 (2.55.2)
# Fixed in https://gitlab.gnome.org/GNOME/glib/commit/cd1f82d8fc741a2203582c12cc21b4dacf7e1872 (2.56.2)
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--body', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default
}
else:
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
cmd += ['--generate-docbook', docbook]
# https://git.gnome.org/browse/glib/commit/?id=ee09bb704fe9ccb24d92dd86696a0e6bb8f0dc1a
if mesonlib.version_compare(glib_version, '>= 2.51.3'):
cmd += ['--output-directory', '@OUTDIR@', '--generate-c-code', namebase, '@INPUT@']
else:
self._print_gdbus_warning()
cmd += ['--generate-c-code', '@OUTDIR@/' + namebase, '@INPUT@']
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default
}
cfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(cfile_custom_target)
output = namebase + '.h'
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd + ['--header', '--output', '@OUTPUT@', '@INPUT@'],
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir
}
else:
custom_kwargs = {'input': xml_files,
'output': output,
'command': cmd,
'build_by_default': build_by_default,
'install': install_header,
'install_dir': install_dir,
'depends': cfile_custom_target
}
hfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(hfile_custom_target)
if 'docbook' in kwargs:
docbook = kwargs['docbook']
if not isinstance(docbook, str):
raise MesonException('docbook value must be a string.')
docbook_cmd = cmd + ['--output-directory', '@OUTDIR@', '--generate-docbook', docbook, '@INPUT@']
# The docbook output is always ${docbook}-${name_of_xml_file}
output = namebase + '-docbook'
outputs = []
for f in xml_files:
outputs.append('{}-{}'.format(docbook, os.path.basename(str(f))))
if mesonlib.version_compare(glib_version, '>= 2.56.2'):
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': docbook_cmd,
'build_by_default': build_by_default
}
else:
custom_kwargs = {'input': xml_files,
'output': outputs,
'command': cmd,
'build_by_default': build_by_default,
'depends': cfile_custom_target
}
docbook_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs)
targets.append(docbook_custom_target)
return ModuleReturnValue(targets, targets)
@permittedKwargs({'sources', 'c_template', 'h_template', 'install_header', 'install_dir',
'comments', 'identifier_prefix', 'symbol_prefix', 'eprod', 'vprod',
'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'depends'})
def mkenums(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Mkenums requires one positional argument.')
basename = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
cmd = []
known_kwargs = ['comments', 'eprod', 'fhead', 'fprod', 'ftail',
'identifier_prefix', 'symbol_prefix', 'template',
'vhead', 'vprod', 'vtail']
known_custom_target_kwargs = ['install_dir', 'build_always',
'depends', 'depend_files']
c_template = h_template = None
install_header = False
for arg, value in kwargs.items():
if arg == 'sources':
raise AssertionError("sources should've already been handled")
elif arg == 'c_template':
c_template = value
if isinstance(c_template, mesonlib.File):
c_template = c_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'c_template and template keyword '
'arguments at the same time.')
elif arg == 'h_template':
h_template = value
if isinstance(h_template, mesonlib.File):
h_template = h_template.absolute_path(state.environment.source_dir, state.environment.build_dir)
if 'template' in kwargs:
raise MesonException('Mkenums does not accept both '
'h_template and template keyword '
'arguments at the same time.')
elif arg == 'install_header':
install_header = value
elif arg in known_kwargs:
cmd += ['--' + arg.replace('_', '-'), value]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Mkenums does not take a %s keyword argument.' % (arg, ))
cmd = [self.interpreter.find_program_impl(['glib-mkenums', 'mkenums'])] + cmd
custom_kwargs = {}
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
targets = []
if h_template is not None:
h_output = os.path.basename(os.path.splitext(h_template)[0])
# We always set template as the first element in the source array
# so --template consumes it.
h_cmd = cmd + ['--template', '@INPUT@']
h_sources = [h_template] + sources
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
h_target = self._make_mkenum_custom_target(state, h_sources,
h_output, h_cmd,
custom_kwargs)
targets.append(h_target)
if c_template is not None:
c_output = os.path.basename(os.path.splitext(c_template)[0])
# We always set template as the first element in the source array
# so --template consumes it.
c_cmd = cmd + ['--template', '@INPUT@']
c_sources = [c_template] + sources
# Never install the C file. Complain on bug tracker if you need it.
custom_kwargs['install'] = False
if h_template is not None:
if 'depends' in custom_kwargs:
custom_kwargs['depends'] += [h_target]
else:
custom_kwargs['depends'] = h_target
c_target = self._make_mkenum_custom_target(state, c_sources,
c_output, c_cmd,
custom_kwargs)
targets.insert(0, c_target)
if c_template is None and h_template is None:
generic_cmd = cmd + ['@INPUT@']
custom_kwargs['install'] = install_header
if 'install_dir' not in custom_kwargs:
custom_kwargs['install_dir'] = \
state.environment.coredata.get_builtin_option('includedir')
target = self._make_mkenum_custom_target(state, sources, basename,
generic_cmd, custom_kwargs)
return ModuleReturnValue(target, [target])
elif len(targets) == 1:
return ModuleReturnValue(targets[0], [targets[0]])
else:
return ModuleReturnValue(targets, targets)
@FeatureNew('gnome.mkenums_simple', '0.42.0')
def mkenums_simple(self, state, args, kwargs):
hdr_filename = args[0] + '.h'
body_filename = args[0] + '.c'
# not really needed, just for sanity checking
forbidden_kwargs = ['c_template', 'h_template', 'eprod', 'fhead',
'fprod', 'ftail', 'vhead', 'vtail', 'comments']
for arg in forbidden_kwargs:
if arg in kwargs:
raise MesonException('mkenums_simple() does not take a %s keyword argument' % (arg, ))
# kwargs to pass as-is from mkenums_simple() to mkenums()
shared_kwargs = ['sources', 'install_header', 'install_dir',
'identifier_prefix', 'symbol_prefix']
mkenums_kwargs = {}
for arg in shared_kwargs:
if arg in kwargs:
mkenums_kwargs[arg] = kwargs[arg]
# .c file generation
c_file_kwargs = copy.deepcopy(mkenums_kwargs)
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs['sources']
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
# The `install_header` argument will be used by mkenums() when
# not using template files, so we need to forcibly unset it
# when generating the C source file, otherwise we will end up
# installing it
c_file_kwargs['install_header'] = False
header_prefix = kwargs.get('header_prefix', '')
decl_decorator = kwargs.get('decorator', '')
func_prefix = kwargs.get('function_prefix', '')
body_prefix = kwargs.get('body_prefix', '')
# Maybe we should write our own template files into the build dir
# instead, but that seems like much more work, nice as it would be.
fhead = ''
if body_prefix != '':
fhead += '%s\n' % body_prefix
fhead += '#include "%s"\n' % hdr_filename
for hdr in sources:
fhead += '#include "%s"\n' % os.path.basename(str(hdr))
fhead += '''
#define C_ENUM(v) ((gint) v)
#define C_FLAGS(v) ((guint) v)
'''
c_file_kwargs['fhead'] = fhead
c_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
c_file_kwargs['vhead'] = '''
GType
%s@enum_name@_get_type (void)
{
static volatile gsize gtype_id = 0;
static const G@Type@Value values[] = {''' % func_prefix
c_file_kwargs['vprod'] = ' { C_@TYPE@(@VALUENAME@), "@VALUENAME@", "@valuenick@" },'
c_file_kwargs['vtail'] = ''' { 0, NULL, NULL }
};
if (g_once_init_enter (>ype_id)) {
GType new_type = g_@type@_register_static ("@EnumName@", values);
g_once_init_leave (>ype_id, new_type);
}
return (GType) gtype_id;
}'''
rv = self.mkenums(state, [body_filename], c_file_kwargs)
c_file = rv.return_value
# .h file generation
h_file_kwargs = copy.deepcopy(mkenums_kwargs)
h_file_kwargs['fhead'] = '''#pragma once
#include <glib-object.h>
{}
G_BEGIN_DECLS
'''.format(header_prefix)
h_file_kwargs['fprod'] = '''
/* enumerations from "@basename@" */
'''
h_file_kwargs['vhead'] = '''
{}
GType {}@enum_name@_get_type (void);
#define @ENUMPREFIX@_TYPE_@ENUMSHORT@ ({}@enum_name@_get_type())'''.format(decl_decorator, func_prefix, func_prefix)
h_file_kwargs['ftail'] = '''
G_END_DECLS'''
rv = self.mkenums(state, [hdr_filename], h_file_kwargs)
h_file = rv.return_value
return ModuleReturnValue([c_file, h_file], [c_file, h_file])
@staticmethod
def _make_mkenum_custom_target(state, sources, output, cmd, kwargs):
custom_kwargs = {
'input': sources,
'output': output,
'capture': True,
'command': cmd
}
custom_kwargs.update(kwargs)
return build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs,
# https://github.com/mesonbuild/meson/issues/973
absolute_paths=True)
@permittedKwargs({'sources', 'prefix', 'install_header', 'install_dir', 'stdinc',
'nostdinc', 'internal', 'skip_source', 'valist_marshallers',
'extra_args'})
def genmarshal(self, state, args, kwargs):
if len(args) != 1:
raise MesonException(
'Genmarshal requires one positional argument.')
output = args[0]
if 'sources' not in kwargs:
raise MesonException('Missing keyword argument "sources".')
sources = kwargs.pop('sources')
if isinstance(sources, str):
sources = [sources]
elif not isinstance(sources, list):
raise MesonException(
'Sources keyword argument must be a string or array.')
new_genmarshal = mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.3')
cmd = [self.interpreter.find_program_impl('glib-genmarshal')]
known_kwargs = ['internal', 'nostdinc', 'skip_source', 'stdinc',
'valist_marshallers', 'extra_args']
known_custom_target_kwargs = ['build_always', 'depends',
'depend_files', 'install_dir',
'install_header']
for arg, value in kwargs.items():
if arg == 'prefix':
cmd += ['--prefix', value]
elif arg == 'extra_args':
if new_genmarshal:
cmd += mesonlib.stringlistify(value)
else:
mlog.warning('The current version of GLib does not support extra arguments \n'
'for glib-genmarshal. You need at least GLib 2.53.3. See ',
mlog.bold('https://github.com/mesonbuild/meson/pull/2049'))
elif arg in known_kwargs and value:
cmd += ['--' + arg.replace('_', '-')]
elif arg not in known_custom_target_kwargs:
raise MesonException(
'Genmarshal does not take a %s keyword argument.' % (
arg, ))
install_header = kwargs.pop('install_header', False)
install_dir = kwargs.pop('install_dir', None)
custom_kwargs = {
'input': sources,
}
# https://github.com/GNOME/glib/commit/0fbc98097fac4d3e647684f344e508abae109fdf
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.51.0'):
cmd += ['--output', '@OUTPUT@']
else:
custom_kwargs['capture'] = True
for arg in known_custom_target_kwargs:
if arg in kwargs:
custom_kwargs[arg] = kwargs[arg]
header_file = output + '.h'
custom_kwargs['command'] = cmd + ['--body', '@INPUT@']
if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.4'):
# Silence any warnings about missing prototypes
custom_kwargs['command'] += ['--include-header', header_file]
custom_kwargs['output'] = output + '.c'
body = build.CustomTarget(output + '_c', state.subdir, state.subproject, custom_kwargs)
custom_kwargs['install'] = install_header
if install_dir is not None:
custom_kwargs['install_dir'] = install_dir
if new_genmarshal:
cmd += ['--pragma-once']
custom_kwargs['command'] = cmd + ['--header', '@INPUT@']
custom_kwargs['output'] = header_file
header = build.CustomTarget(output + '_h', state.subdir, state.subproject, custom_kwargs)
rv = [body, header]
return ModuleReturnValue(rv, rv)
@staticmethod
def _vapi_args_to_command(prefix, variable, kwargs, accept_vapi=False):
arg_list = mesonlib.extract_as_list(kwargs, variable)
ret = []
for arg in arg_list:
if not isinstance(arg, str):
types = 'strings' + ' or InternalDependencys' if accept_vapi else ''
raise MesonException('All {} must be {}'.format(variable, types))
ret.append(prefix + arg)
return ret
def _extract_vapi_packages(self, state, kwargs):
'''
Packages are special because we need to:
- Get a list of packages for the .deps file
- Get a list of depends for any VapiTargets
- Get package name from VapiTargets
- Add include dirs for any VapiTargets
'''
arg_list = kwargs.get('packages')
if not arg_list:
return [], [], [], []
arg_list = mesonlib.listify(arg_list)
vapi_depends = []
vapi_packages = []
vapi_includes = []
ret = []
remaining_args = []
for arg in arg_list:
if hasattr(arg, 'held_object'):
arg = arg.held_object
if isinstance(arg, InternalDependency):
targets = [t for t in arg.sources if isinstance(t, VapiTarget)]
for target in targets:
srcdir = os.path.join(state.environment.get_source_dir(),
target.get_subdir())
outdir = os.path.join(state.environment.get_build_dir(),
target.get_subdir())
outfile = target.get_outputs()[0][:-5] # Strip .vapi
ret.append('--vapidir=' + outdir)
ret.append('--girdir=' + outdir)
ret.append('--pkg=' + outfile)
vapi_depends.append(target)
vapi_packages.append(outfile)
vapi_includes.append(srcdir)
else:
vapi_packages.append(arg)
remaining_args.append(arg)
kwargs['packages'] = remaining_args
vapi_args = ret + self._vapi_args_to_command('--pkg=', 'packages', kwargs, accept_vapi=True)
return vapi_args, vapi_depends, vapi_packages, vapi_includes
def _generate_deps(self, state, library, packages, install_dir):
outdir = state.environment.scratch_dir
fname = os.path.join(outdir, library + '.deps')
with open(fname, 'w') as ofile:
for package in packages:
ofile.write(package + '\n')
return build.Data(mesonlib.File(True, outdir, fname), install_dir)
def _get_vapi_link_with(self, target):
link_with = []
for dep in target.get_target_dependencies():
if isinstance(dep, build.SharedLibrary):
link_with.append(dep)
elif isinstance(dep, GirTarget):
link_with += self._get_vapi_link_with(dep)
return link_with
@permittedKwargs({'sources', 'packages', 'metadata_dirs', 'gir_dirs',
'vapi_dirs', 'install', 'install_dir'})
def generate_vapi(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('The library name is required')
if not isinstance(args[0], str):
raise MesonException('The first argument must be the name of the library')
created_values = []
library = args[0]
build_dir = os.path.join(state.environment.get_build_dir(), state.subdir)
source_dir = os.path.join(state.environment.get_source_dir(), state.subdir)
pkg_cmd, vapi_depends, vapi_packages, vapi_includes = self._extract_vapi_packages(state, kwargs)
if 'VAPIGEN' in os.environ:
cmd = [self.interpreter.find_program_impl(os.environ['VAPIGEN'])]
else:
cmd = [self.interpreter.find_program_impl('vapigen')]
cmd += ['--quiet', '--library=' + library, '--directory=' + build_dir]
cmd += self._vapi_args_to_command('--vapidir=', 'vapi_dirs', kwargs)
cmd += self._vapi_args_to_command('--metadatadir=', 'metadata_dirs', kwargs)
cmd += self._vapi_args_to_command('--girdir=', 'gir_dirs', kwargs)
cmd += pkg_cmd
cmd += ['--metadatadir=' + source_dir]
if 'sources' not in kwargs:
raise MesonException('sources are required to generate the vapi file')
inputs = mesonlib.extract_as_list(kwargs, 'sources')
link_with = []
for i in inputs:
if isinstance(i, str):
cmd.append(os.path.join(source_dir, i))
elif hasattr(i, 'held_object') and isinstance(i.held_object, GirTarget):
link_with += self._get_vapi_link_with(i.held_object)
subdir = os.path.join(state.environment.get_build_dir(),
i.held_object.get_subdir())
gir_file = os.path.join(subdir, i.held_object.get_outputs()[0])
cmd.append(gir_file)
else:
raise MesonException('Input must be a str or GirTarget')
vapi_output = library + '.vapi'
custom_kwargs = {
'command': cmd,
'input': inputs,
'output': vapi_output,
'depends': vapi_depends,
}
install_dir = kwargs.get('install_dir',
os.path.join(state.environment.coredata.get_builtin_option('datadir'),
'vala', 'vapi'))
if kwargs.get('install'):
custom_kwargs['install'] = kwargs['install']
custom_kwargs['install_dir'] = install_dir
# We shouldn't need this locally but we install it
deps_target = self._generate_deps(state, library, vapi_packages, install_dir)
created_values.append(deps_target)
vapi_target = VapiTarget(vapi_output, state.subdir, state.subproject, custom_kwargs)
# So to try our best to get this to just work we need:
# - link with with the correct library
# - include the vapi and dependent vapi files in sources
# - add relevant directories to include dirs
incs = [build.IncludeDirs(state.subdir, ['.'] + vapi_includes, False)]
sources = [vapi_target] + vapi_depends
rv = InternalDependency(None, incs, [], [], link_with, [], sources, [])
created_values.append(rv)
return ModuleReturnValue(rv, created_values)
def initialize(*args, **kwargs):
return GnomeModule(*args, **kwargs)
| apache-2.0 | -8,472,845,615,804,737,000 | 45.87234 | 133 | 0.555528 | false |
ContinuumIO/topik | topik/vectorizers/tfidf.py | 1 | 1139 | from math import log
from ._registry import register
from vectorizer_output import VectorizerOutput
from .bag_of_words import _count_words_in_docs
def _count_document_occurences(doc_counts, total_words):
return {word_id: sum(1 for doc in doc_counts.values() if word_id in doc)
for word_id in range(total_words)}
def _calculate_tfidf(tokenized_corpus, vectorizer_output):
tokens = list(tokenized_corpus)
doc_counts = _count_words_in_docs(tokens, vectorizer_output)
document_occurrences = _count_document_occurences(doc_counts, vectorizer_output.global_term_count)
idf = {word_id: log(len(tokens) / (document_occurrences[word_id]))
for word_id in range(vectorizer_output.global_term_count)}
tf_idf = {}
# TODO: this is essentially a sparse matrix multiply and could be done much more efficiently
for id, doc in doc_counts.items():
tf_idf[id] = {}
for word_id, count in doc.items():
tf_idf[id].update({word_id: count*idf[word_id]})
return tf_idf
@register
def tfidf(tokenized_corpus):
return VectorizerOutput(tokenized_corpus, _calculate_tfidf)
| bsd-3-clause | -7,258,414,536,797,177,000 | 38.275862 | 102 | 0.702371 | false |
tumbl3w33d/ansible | lib/ansible/modules/cloud/amazon/ec2_eni.py | 31 | 23840 | #!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
of the network interface.
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
eni_id:
description:
- The ID of the ENI (to modify).
- If I(eni_id=None) and I(state=present), a new eni will be created.
type: str
instance_id:
description:
- Instance ID that you wish to attach ENI to.
- Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).
type: str
private_ip_address:
description:
- Private IP address.
type: str
subnet_id:
description:
- ID of subnet in which to create the ENI.
type: str
description:
description:
- Optional description of the ENI.
type: str
security_groups:
description:
- List of security groups associated with the interface. Only used when I(state=present).
- Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
type: list
elements: str
state:
description:
- Create or delete ENI.
default: present
choices: [ 'present', 'absent' ]
type: str
device_index:
description:
- The index of the device for the network interface attachment on the instance.
default: 0
type: int
attached:
description:
- Specifies if network interface should be attached or detached from instance. If omitted, attachment status
won't change
version_added: 2.2
type: bool
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)
or when deleting an interface with I(state=absent).
default: false
type: bool
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
interface is being modified, not on creation.
required: false
type: bool
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
You can only specify this flag when the interface is being modified, not on creation.
required: false
type: bool
secondary_private_ip_addresses:
description:
- A list of IP addresses to assign as secondary IP addresses to the network interface.
This option is mutually exclusive of I(secondary_private_ip_address_count)
required: false
version_added: 2.2
type: list
elements: str
purge_secondary_private_ip_addresses:
description:
- To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
- Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.
default: false
type: bool
version_added: 2.5
secondary_private_ip_address_count:
description:
- The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)
required: false
version_added: 2.2
type: int
allow_reassignment:
description:
- Indicates whether to allow an IP address that is already assigned to another network interface or instance
to be reassigned to the specified network interface.
required: false
default: false
type: bool
version_added: 2.7
extends_documentation_fragment:
- aws
- ec2
notes:
- This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),
or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI with two secondary addresses
- ec2_eni:
subnet_id: subnet-xxxxxxxx
state: present
secondary_private_ip_address_count: 2
# Assign a secondary IP address to an existing ENI
# This will purge any existing IPs
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_addresses:
- 172.16.1.1
# Remove any secondary IP addresses from an existing ENI
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_address_count: 0
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: true
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Update an ENI identifying it by private_ip_address and subnet_id
- ec2_eni:
subnet_id: subnet-xxxxxxx
private_ip_address: 172.16.1.1
description: "My new description"
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: "{{ eni.interface.id }}"
delete_on_termination: true
'''
RETURN = '''
interface:
description: Network interface attributes
returned: when state != absent
type: complex
contains:
description:
description: interface description
type: str
sample: Firewall network interface
groups:
description: list of security groups
type: list
elements: dict
sample: [ { "sg-f8a8a9da": "default" } ]
id:
description: network interface id
type: str
sample: "eni-1d889198"
mac_address:
description: interface's physical address
type: str
sample: "00:00:5E:00:53:23"
owner_id:
description: aws account id
type: str
sample: 812381371
private_ip_address:
description: primary ip address of this interface
type: str
sample: 10.20.30.40
private_ip_addresses:
description: list of all private ip addresses associated to this interface
type: list
elements: dict
sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
source_dest_check:
description: value of source/dest check flag
type: bool
sample: True
status:
description: network interface status
type: str
sample: "pending"
subnet_id:
description: which vpc subnet the interface is bound
type: str
sample: subnet-b0a0393c
vpc_id:
description: which vpc this network interface is bound
type: str
sample: vpc-9a9a9da
'''
import time
import re
try:
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
ec2_argument_spec, get_aws_connection_info,
get_ec2_security_group_ids_from_names)
def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
'private_ip_addresses': private_addresses
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, vpc_id, module):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
if instance_id == 'None':
instance_id = None
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
changed = False
try:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if attached and instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
if secondary_private_ip_address_count is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
except BotoServerError:
eni.delete()
raise
if secondary_private_ip_addresses is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
except BotoServerError:
eni.delete()
raise
changed = True
except BotoServerError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, vpc_id, module, eni):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
do_detach = module.params.get('state') == 'detached'
device_index = module.params.get("device_index")
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
allow_reassignment = module.params.get("allow_reassignment")
changed = False
try:
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if len(security_groups) > 0:
groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed = True
if delete_on_termination is not None and eni.attachment is not None:
if eni.attachment.delete_on_termination is not delete_on_termination:
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
changed = True
current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
if secondary_private_ip_addresses is not None:
secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
connection.unassign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=list(set(current_secondary_addresses) -
set(secondary_private_ip_addresses)),
dry_run=False)
changed = True
secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
if secondary_addresses_to_add:
connection.assign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=secondary_addresses_to_add,
secondary_private_ip_address_count=None,
allow_reassignment=allow_reassignment, dry_run=False)
changed = True
if secondary_private_ip_address_count is not None:
current_secondary_address_count = len(current_secondary_addresses)
if secondary_private_ip_address_count > current_secondary_address_count:
connection.assign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=None,
secondary_private_ip_address_count=(secondary_private_ip_address_count -
current_secondary_address_count),
allow_reassignment=allow_reassignment, dry_run=False)
changed = True
elif secondary_private_ip_address_count < current_secondary_address_count:
# How many of these addresses do we want to remove
secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
connection.unassign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count],
dry_run=False)
if attached is True:
if eni.attachment and eni.attachment.instance_id != instance_id:
detach_eni(eni, module)
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
if eni.attachment is None:
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
elif attached is False:
detach_eni(eni, module)
except BotoServerError as e:
module.fail_json(msg=e.message)
eni.update()
module.exit_json(changed=changed, interface=get_eni_info(eni))
def delete_eni(connection, module):
eni_id = module.params.get("eni_id")
force_detach = module.params.get("force_detach")
try:
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if force_detach is True:
if eni.attachment is not None:
eni.detach(force_detach)
# Wait to allow detachment to finish
wait_for_eni(eni, "detached")
eni.update()
eni.delete()
changed = True
else:
eni.delete()
changed = True
module.exit_json(changed=changed)
except BotoServerError as e:
regex = re.compile('The networkInterface ID \'.*\' does not exist')
if regex.search(e.message) is not None:
module.exit_json(changed=False)
else:
module.fail_json(msg=e.message)
def detach_eni(eni, module):
attached = module.params.get("attached")
force_detach = module.params.get("force_detach")
if eni.attachment is not None:
eni.detach(force_detach)
wait_for_eni(eni, "detached")
if attached:
return
eni.update()
module.exit_json(changed=True, interface=get_eni_info(eni))
else:
module.exit_json(changed=False, interface=get_eni_info(eni))
def uniquely_find_eni(connection, module):
eni_id = module.params.get("eni_id")
private_ip_address = module.params.get('private_ip_address')
subnet_id = module.params.get('subnet_id')
instance_id = module.params.get('instance_id')
device_index = module.params.get('device_index')
attached = module.params.get('attached')
try:
filters = {}
# proceed only if we're univocally specifying an ENI
if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
return None
if private_ip_address and subnet_id:
filters['private-ip-address'] = private_ip_address
filters['subnet-id'] = subnet_id
if not attached and instance_id and device_index:
filters['attachment.instance-id'] = instance_id
filters['attachment.device-index'] = device_index
if eni_id is None and len(filters) == 0:
return None
eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
if len(eni_result) == 1:
return eni_result[0]
else:
return None
except BotoServerError as e:
module.fail_json(msg=e.message)
return None
def get_sec_group_list(groups):
# Build list of remote security groups
remote_security_groups = []
for group in groups:
remote_security_groups.append(group.id.encode())
return remote_security_groups
def _get_vpc_id(connection, module, subnet_id):
try:
return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
except BotoServerError as e:
module.fail_json(msg=e.message)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id=dict(default=None, type='str'),
instance_id=dict(default=None, type='str'),
private_ip_address=dict(type='str'),
subnet_id=dict(type='str'),
description=dict(type='str'),
security_groups=dict(default=[], type='list'),
device_index=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent']),
force_detach=dict(default='no', type='bool'),
source_dest_check=dict(default=None, type='bool'),
delete_on_termination=dict(default=None, type='bool'),
secondary_private_ip_addresses=dict(default=None, type='list'),
purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
secondary_private_ip_address_count=dict(default=None, type='int'),
allow_reassignment=dict(default=False, type='bool'),
attached=dict(default=None, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
],
required_if=([
('state', 'absent', ['eni_id']),
('attached', True, ['instance_id']),
('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
])
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
if state == 'present':
eni = uniquely_find_eni(connection, module)
if eni is None:
subnet_id = module.params.get("subnet_id")
if subnet_id is None:
module.fail_json(msg="subnet_id is required when creating a new ENI")
vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
create_eni(connection, vpc_id, module)
else:
vpc_id = eni.vpc_id
modify_eni(connection, vpc_id, module, eni)
elif state == 'absent':
delete_eni(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 | -9,060,336,838,755,929,000 | 36.661927 | 154 | 0.617534 | false |
NeCTAR-RC/nova | nova/virt/libvirt/storage/dmcrypt.py | 47 | 3448 | # Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from nova.i18n import _LE
from nova.virt.libvirt import utils
LOG = logging.getLogger(__name__)
_dmcrypt_suffix = '-dmcrypt'
def volume_name(base):
"""Returns the suffixed dmcrypt volume name.
This is to avoid collisions with similarly named device mapper names for
LVM volumes
"""
return base + _dmcrypt_suffix
def is_encrypted(path):
"""Returns true if the path corresponds to an encrypted disk."""
if path.startswith('/dev/mapper'):
return path.rpartition('/')[2].endswith(_dmcrypt_suffix)
else:
return False
def create_volume(target, device, cipher, key_size, key):
"""Sets up a dmcrypt mapping
:param target: device mapper logical device name
:param device: underlying block device
:param cipher: encryption cipher string digestible by cryptsetup
:param key_size: encryption key size
:param key: encryption key as an array of unsigned bytes
"""
cmd = ('cryptsetup',
'create',
target,
device,
'--cipher=' + cipher,
'--key-size=' + str(key_size),
'--key-file=-')
key = ''.join(map(lambda byte: "%02x" % byte, key))
try:
utils.execute(*cmd, process_input=key, run_as_root=True)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not start encryption for disk %(device)s: "
"%(exception)s"), {'device': device, 'exception': e})
def delete_volume(target):
"""Deletes a dmcrypt mapping
:param target: name of the mapped logical device
"""
try:
utils.execute('cryptsetup', 'remove', target, run_as_root=True)
except processutils.ProcessExecutionError as e:
# cryptsetup returns 4 when attempting to destroy a non-existent
# dm-crypt device. It indicates that the device is invalid, which
# means that the device is invalid (i.e., it has already been
# destroyed).
if e.exit_code == 4:
LOG.debug("Ignoring exit code 4, volume already destroyed")
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Could not disconnect encrypted volume "
"%(volume)s. If dm-crypt device is still active "
"it will have to be destroyed manually for "
"cleanup to succeed."), {'volume': target})
def list_volumes():
"""Function enumerates encrypted volumes."""
return [dmdev for dmdev in os.listdir('/dev/mapper')
if dmdev.endswith('-dmcrypt')]
| apache-2.0 | -6,808,341,011,872,560,000 | 34.183673 | 79 | 0.645882 | false |
keyurpatel076/MissionPlannerGit | Lib/unittest/case.py | 41 | 42078 | """Test case implementation"""
import collections
import sys
import functools
import difflib
import pprint
import re
import warnings
from . import result
from .util import (
strclass, safe_repr, unorderable_list_difference,
_count_diff_all_purpose, _count_diff_hashable
)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
pass
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
pass
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = False
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, self.assertDictEqual)
self.addTypeEqualityFunc(list, self.assertListEqual)
self.addTypeEqualityFunc(tuple, self.assertTupleEqual)
self.addTypeEqualityFunc(set, self.assertSetEqual)
self.addTypeEqualityFunc(frozenset, self.assertSetEqual)
self.addTypeEqualityFunc(unicode, self.assertMultiLineEqual)
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest as e:
self._addSkip(result, str(e))
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except KeyboardInterrupt:
raise
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure as e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, sys.exc_info())
except SkipTest as e:
self._addSkip(result, str(e))
except:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self)
if callableObj is None:
return context
with context:
callableObj(*args, **kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
Counter(iter(expected_seq)))
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(actual_seq), list(expected_seq)
with warnings.catch_warnings():
if sys.py3kwarning:
# Silence Py3k warning raised during the sorting
for _msg in ["(code|dict|type) inequality comparisons",
"builtin_function_or_method order comparisons",
"comparing unequal types"]:
warnings.filterwarnings("ignore", _msg, DeprecationWarning)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, basestring,
'First argument is not a string')
self.assertIsInstance(second, basestring,
'Second argument is not a string')
if first != second:
firstlines = first.splitlines(True)
secondlines = second.splitlines(True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, expected_regexp)
if callable_obj is None:
return context
with context:
callable_obj(*args, **kwargs)
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| gpl-3.0 | 4,718,258,924,345,535,000 | 38.621469 | 109 | 0.571201 | false |
tensorflow/lucid | tests/misc/io/test_saving.py | 1 | 6630 | import time
import pytest
import numpy as np
from lucid.misc.io.saving import save, CaptureSaveContext, batch_save
from lucid.misc.io.loading import load
from lucid.misc.io.scoping import io_scope, current_io_scopes
from concurrent.futures import ThreadPoolExecutor
import os.path
import io
import tensorflow as tf
dictionary = {"key": "value"}
dictionary_json = """{
"key": "value"
}"""
array1 = np.eye(10, 10)
array2 = np.dstack([np.eye(10, 10, k=i - 1) for i in range(3)])
def _remove(path):
try:
os.remove(path)
except OSError:
pass
def test_save_json():
path = "./tests/fixtures/generated_outputs/dictionary.json"
_remove(path)
save(dictionary, path)
assert os.path.isfile(path)
content = io.open(path, "rt").read()
assert content == dictionary_json
def test_save_npy():
path = "./tests/fixtures/generated_outputs/array.npy"
_remove(path)
save(array1, path)
assert os.path.isfile(path)
re_read_array = np.load(path)
assert np.array_equal(array1, re_read_array)
def test_save_npz_array():
path = "./tests/fixtures/generated_outputs/arrays.npz"
_remove(path)
save([array1, array2], path)
assert os.path.isfile(path)
re_read_arrays = np.load(path)
assert all(arr in re_read_arrays for arr in ("arr_0", "arr_1"))
assert np.array_equal(array1, re_read_arrays["arr_0"])
assert np.array_equal(array2, re_read_arrays["arr_1"])
def test_save_npz_dict():
path = "./tests/fixtures/generated_outputs/arrays.npz"
_remove(path)
arrays = {"array1": array1, "array2": array2}
save(arrays, path)
assert os.path.isfile(path)
re_read_arrays = np.load(path)
assert all(arr in re_read_arrays for arr in list(arrays))
assert np.array_equal(arrays["array1"], re_read_arrays["array1"])
def test_save_image_png():
path = "./tests/fixtures/generated_outputs/rgbeye.png"
_remove(path)
save(array2, path)
assert os.path.isfile(path)
def test_save_image_jpg():
path = "./tests/fixtures/generated_outputs/rgbeye.jpg"
_remove(path)
save(array2, path)
assert os.path.isfile(path)
def test_save_array_txt():
path = "./tests/fixtures/generated_outputs/multiline.txt"
_remove(path)
stringarray = ["Line {:d}".format(i) for i in range(10)]
save(stringarray, path)
assert os.path.isfile(path)
def test_save_txt():
path = "./tests/fixtures/generated_outputs/multiline.txt"
_remove(path)
string = "".join(["Line {:d}\n".format(i) for i in range(10)])
save(string, path)
assert os.path.isfile(path)
def test_save_named_handle():
path = "./tests/fixtures/generated_outputs/rgbeye.jpg"
_remove(path)
with io.open(path, "wb") as handle:
save(array2, handle)
assert os.path.isfile(path)
def test_save_compressed_npy():
uncompressed_path = "./tests/fixtures/generated_outputs/array.npy"
_remove(uncompressed_path)
save(array2, uncompressed_path)
compressed_path = "./tests/fixtures/generated_outputs/array.npy.xz"
_remove(compressed_path)
save(array2, compressed_path)
assert os.path.isfile(uncompressed_path)
assert os.path.isfile(compressed_path)
re_read_array = load(compressed_path)
assert np.array_equal(array2, re_read_array)
uncompressed_size = os.path.getsize(uncompressed_path)
compressed_size = os.path.getsize(compressed_path)
assert compressed_size < uncompressed_size
def test_save_load_pickle():
path = "./tests/fixtures/generated_outputs/some_data.pickle"
data = {
'test': [1, 2, 3, "some string"],
'numpy_values': array2
}
_remove(path)
with io.open(path, "wb") as handle:
with pytest.raises(ValueError):
save(data, handle)
save(data, handle, allow_unsafe_formats=True)
assert os.path.isfile(path)
with pytest.raises(ValueError):
loaded_data = load(path)
loaded_data = load(path, allow_unsafe_formats=True)
assert loaded_data['test'] == data['test']
assert np.array_equal(loaded_data['numpy_values'], data['numpy_values'])
def test_unknown_extension():
with pytest.raises(ValueError):
save({}, "test.unknown")
def test_unknown_compressor():
with pytest.raises(ValueError):
save(array2, "test.npy.gz") # .gz is not currently supported, only xy
def test_save_protobuf():
path = "./tests/fixtures/generated_outputs/graphdef.pb"
_remove(path)
with tf.Graph().as_default():
a = tf.Variable(42)
graphdef = a.graph.as_graph_def()
save(graphdef, path)
assert os.path.isfile(path)
def test_write_scope_compatibility():
path = "./tests/fixtures/generated_outputs/write_scope_compatibility.txt"
_remove(path)
with io_scope("./tests/fixtures/generated_outputs"):
save("test content", 'write_scope_compatibility.txt')
assert os.path.isfile(path)
def test_capturing_saves():
path = "./tests/fixtures/generated_outputs/test_capturing_saves.txt"
_remove(path)
context = CaptureSaveContext()
with context, io_scope("./tests/fixtures/generated_outputs"):
save("test", "test_capturing_saves.txt")
captured = context.captured_saves
assert len(captured) == 1
assert "type" in captured[0]
assert captured[0]["type"] == "txt"
def test_threadlocal_io_scopes():
""" This tests that scopes are thread local and they don't clobber each other when different threads are competing"""
def _return_io_scope(io_scope_path):
with io_scope(io_scope_path):
time.sleep(np.random.uniform(0.05, 0.1))
return current_io_scopes()[-1]
n_tasks = 16
n_workers = 8
with ThreadPoolExecutor(max_workers=n_workers) as executor:
futures = {executor.submit(_return_io_scope, f'gs://test-{i}'): f'gs://test-{i}' for i in range(n_tasks)}
results = [f.result() for f in futures]
assert results == list(futures.values())
def test_batch_saves():
save_ops = [(str(i), f"write_batch_{i}.txt") for i in range(5)]
[_remove(f"./tests/fixtures/generated_outputs/write_batch_{i}.txt") for i in range(5)]
context = CaptureSaveContext()
with context, io_scope("./tests/fixtures/generated_outputs"):
results = batch_save(save_ops)
assert len(results) == 5
assert len(context.captured_saves) == 5
assert context.captured_saves[0]['type'] == 'txt'
print(context.captured_saves)
assert 'write_batch_' in context.captured_saves[0]['url']
assert all([os.path.isfile(f"./tests/fixtures/generated_outputs/write_batch_{i}.txt") for i in range(5)])
| apache-2.0 | 1,805,194,324,899,068,200 | 30.126761 | 121 | 0.66365 | false |
collinjackson/mojo | nacl_bindings_generator/interface_dsl.py | 5 | 4095 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Interface(object):
def __init__(self):
self.functions = []
def Func(self, name, return_type):
f = Function(self, len(self.functions), name, return_type)
self.functions.append(f)
return f
def Finalize(self):
for f in self.functions:
f.Finalize()
class Function(object):
def __init__(self, parent, uid, name, return_type):
self.parent = parent
self.uid = uid
self.name = name
self.return_type = return_type
self.params = []
self.param_by_name = {}
self.result_param = None
self.broken_in_nacl = False
def Param(self, name, param_type=None):
p = Param(self, len(self.params), name, param_type)
self.params.append(p)
self.param_by_name[name] = p
return p
def ParamList(self):
return [param.param_type + ' ' + param.name for param in self.params]
def ParamDecl(self):
if self.params:
return ', '.join(self.ParamList())
else:
return 'void'
def IsBrokenInNaCl(self):
self.broken_in_nacl = True
def Finalize(self):
self.result_param = Param(self, len(self.params), 'result')
self.result_param.Out(self.return_type).AlwaysWritten()
class Param(object):
def __init__(self, parent, uid, name, param_type=None):
self.parent = parent
self.uid = uid
self.name = name
self.base_type = param_type
self.param_type = param_type
self.size = None
self.is_input = False
self.is_output = False
self.is_array = False
self.is_struct = False
self.is_extensible = False
self.is_optional = False
self.is_always_written = False
self.is_pointer = False
def GetSizeParam(self):
assert self.size
return self.parent.param_by_name[self.size]
def In(self, ty):
self.base_type = ty
self.param_type = ty
self.is_input = True
self.is_pointer = ty.endswith('*')
return self
def InArray(self, ty, size):
self.base_type = ty
self.param_type = 'const ' + ty + '*'
self.size = size
self.is_input = True
self.is_array = True
return self
# An "extensible" struct is one where we don't know the exact size - rather
# the first 4 bytes of the struct declare the length of the struct. This
# allows forwards and backwards compatibility with additive changes to the
# structure definition.
def InExtensibleStruct(self, ty):
self.base_type = ty
self.param_type = 'const struct ' + ty + '*'
self.is_input = True
self.is_struct = True
self.is_extensible = True
return self
def InOut(self, ty):
self.base_type = ty
self.param_type = ty + '*'
self.is_input = True
self.is_output = True
self.is_pointer = ty.endswith('*')
return self
def Out(self, ty):
self.base_type = ty
self.param_type = ty + '*'
self.is_output = True
self.is_pointer = ty.endswith('*')
return self
def OutArray(self, ty, size):
self.base_type = ty
self.param_type = ty + '*'
self.size = size
self.is_array = True
self.is_output = True
return self
# The size of the struct is fixed by the API, it cannot be extended.
def OutFixedStruct(self, ty):
self.base_type = ty
self.param_type = 'struct ' + ty + '*'
self.is_output = True
self.is_struct = True
self.is_extensible = False
return self
def OutFixedStructArray(self, ty, size):
self.base_type = ty
self.param_type = 'struct ' + ty + '*'
self.size = size
self.is_array = True
self.is_output = True
return self
# Declares that it is valid to pass a null pointer.
def Optional(self):
assert not self.IsPassedByValue()
self.is_optional = True
return self
def AlwaysWritten(self):
assert self.is_output, self
self.is_always_written = True
return self
def IsScalar(self):
return not self.is_array and not self.is_struct
def IsPassedByValue(self):
return not self.is_output and self.IsScalar()
| bsd-3-clause | -2,292,047,989,961,554,200 | 25.590909 | 77 | 0.643712 | false |
idea4bsd/idea4bsd | plugins/hg4idea/testData/bin/mercurial/hgweb/hgweb_mod.py | 91 | 15218 | # hgweb/hgweb_mod.py - Web interface for a repository.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <[email protected]>
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
from mercurial import ui, hg, hook, error, encoding, templater, util, repoview
from mercurial.templatefilters import websub
from mercurial.i18n import _
from common import get_stat, ErrorResponse, permhooks, caching
from common import HTTP_OK, HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST
from common import HTTP_NOT_FOUND, HTTP_SERVER_ERROR
from request import wsgirequest
import webcommands, protocol, webutil, re
perms = {
'changegroup': 'pull',
'changegroupsubset': 'pull',
'getbundle': 'pull',
'stream_out': 'pull',
'listkeys': 'pull',
'unbundle': 'push',
'pushkey': 'push',
}
def makebreadcrumb(url, prefix=''):
'''Return a 'URL breadcrumb' list
A 'URL breadcrumb' is a list of URL-name pairs,
corresponding to each of the path items on a URL.
This can be used to create path navigation entries.
'''
if url.endswith('/'):
url = url[:-1]
if prefix:
url = '/' + prefix + url
relpath = url
if relpath.startswith('/'):
relpath = relpath[1:]
breadcrumb = []
urlel = url
pathitems = [''] + relpath.split('/')
for pathel in reversed(pathitems):
if not pathel or not urlel:
break
breadcrumb.append({'url': urlel, 'name': pathel})
urlel = os.path.dirname(urlel)
return reversed(breadcrumb)
class hgweb(object):
def __init__(self, repo, name=None, baseui=None):
if isinstance(repo, str):
if baseui:
u = baseui.copy()
else:
u = ui.ui()
self.repo = hg.repository(u, repo)
else:
self.repo = repo
self.repo = self._getview(self.repo)
self.repo.ui.setconfig('ui', 'report_untrusted', 'off')
self.repo.baseui.setconfig('ui', 'report_untrusted', 'off')
self.repo.ui.setconfig('ui', 'nontty', 'true')
self.repo.baseui.setconfig('ui', 'nontty', 'true')
hook.redirect(True)
self.mtime = -1
self.size = -1
self.reponame = name
self.archives = 'zip', 'gz', 'bz2'
self.stripecount = 1
# a repo owner may set web.templates in .hg/hgrc to get any file
# readable by the user running the CGI script
self.templatepath = self.config('web', 'templates')
self.websubtable = self.loadwebsub()
# The CGI scripts are often run by a user different from the repo owner.
# Trust the settings from the .hg/hgrc files by default.
def config(self, section, name, default=None, untrusted=True):
return self.repo.ui.config(section, name, default,
untrusted=untrusted)
def configbool(self, section, name, default=False, untrusted=True):
return self.repo.ui.configbool(section, name, default,
untrusted=untrusted)
def configlist(self, section, name, default=None, untrusted=True):
return self.repo.ui.configlist(section, name, default,
untrusted=untrusted)
def _getview(self, repo):
viewconfig = self.config('web', 'view', 'served')
if viewconfig == 'all':
return repo.unfiltered()
elif viewconfig in repoview.filtertable:
return repo.filtered(viewconfig)
else:
return repo.filtered('served')
def refresh(self, request=None):
st = get_stat(self.repo.spath)
# compare changelog size in addition to mtime to catch
# rollbacks made less than a second ago
if st.st_mtime != self.mtime or st.st_size != self.size:
self.mtime = st.st_mtime
self.size = st.st_size
r = hg.repository(self.repo.baseui, self.repo.root)
self.repo = self._getview(r)
self.maxchanges = int(self.config("web", "maxchanges", 10))
self.stripecount = int(self.config("web", "stripes", 1))
self.maxshortchanges = int(self.config("web", "maxshortchanges",
60))
self.maxfiles = int(self.config("web", "maxfiles", 10))
self.allowpull = self.configbool("web", "allowpull", True)
encoding.encoding = self.config("web", "encoding",
encoding.encoding)
if request:
self.repo.ui.environ = request.env
def run(self):
if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
raise RuntimeError("This function is only intended to be "
"called while running as a CGI script.")
import mercurial.hgweb.wsgicgi as wsgicgi
wsgicgi.launch(self)
def __call__(self, env, respond):
req = wsgirequest(env, respond)
return self.run_wsgi(req)
def run_wsgi(self, req):
self.refresh(req)
# work with CGI variables to create coherent structure
# use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
req.url = req.env['SCRIPT_NAME']
if not req.url.endswith('/'):
req.url += '/'
if 'REPO_NAME' in req.env:
req.url += req.env['REPO_NAME'] + '/'
if 'PATH_INFO' in req.env:
parts = req.env['PATH_INFO'].strip('/').split('/')
repo_parts = req.env.get('REPO_NAME', '').split('/')
if parts[:len(repo_parts)] == repo_parts:
parts = parts[len(repo_parts):]
query = '/'.join(parts)
else:
query = req.env['QUERY_STRING'].split('&', 1)[0]
query = query.split(';', 1)[0]
# process this if it's a protocol request
# protocol bits don't need to create any URLs
# and the clients always use the old URL structure
cmd = req.form.get('cmd', [''])[0]
if protocol.iscmd(cmd):
try:
if query:
raise ErrorResponse(HTTP_NOT_FOUND)
if cmd in perms:
self.check_perm(req, perms[cmd])
return protocol.call(self.repo, req, cmd)
except ErrorResponse, inst:
# A client that sends unbundle without 100-continue will
# break if we respond early.
if (cmd == 'unbundle' and
(req.env.get('HTTP_EXPECT',
'').lower() != '100-continue') or
req.env.get('X-HgHttp2', '')):
req.drain()
req.respond(inst, protocol.HGTYPE,
body='0\n%s\n' % inst.message)
return ''
# translate user-visible url structure to internal structure
args = query.split('/', 2)
if 'cmd' not in req.form and args and args[0]:
cmd = args.pop(0)
style = cmd.rfind('-')
if style != -1:
req.form['style'] = [cmd[:style]]
cmd = cmd[style + 1:]
# avoid accepting e.g. style parameter as command
if util.safehasattr(webcommands, cmd):
req.form['cmd'] = [cmd]
else:
cmd = ''
if cmd == 'static':
req.form['file'] = ['/'.join(args)]
else:
if args and args[0]:
node = args.pop(0)
req.form['node'] = [node]
if args:
req.form['file'] = args
ua = req.env.get('HTTP_USER_AGENT', '')
if cmd == 'rev' and 'mercurial' in ua:
req.form['style'] = ['raw']
if cmd == 'archive':
fn = req.form['node'][0]
for type_, spec in self.archive_specs.iteritems():
ext = spec[2]
if fn.endswith(ext):
req.form['node'] = [fn[:-len(ext)]]
req.form['type'] = [type_]
# process the web interface request
try:
tmpl = self.templater(req)
ctype = tmpl('mimetype', encoding=encoding.encoding)
ctype = templater.stringify(ctype)
# check read permissions non-static content
if cmd != 'static':
self.check_perm(req, None)
if cmd == '':
req.form['cmd'] = [tmpl.cache['default']]
cmd = req.form['cmd'][0]
if self.configbool('web', 'cache', True):
caching(self, req) # sets ETag header or raises NOT_MODIFIED
if cmd not in webcommands.__all__:
msg = 'no such method: %s' % cmd
raise ErrorResponse(HTTP_BAD_REQUEST, msg)
elif cmd == 'file' and 'raw' in req.form.get('style', []):
self.ctype = ctype
content = webcommands.rawfile(self, req, tmpl)
else:
content = getattr(webcommands, cmd)(self, req, tmpl)
req.respond(HTTP_OK, ctype)
return content
except (error.LookupError, error.RepoLookupError), err:
req.respond(HTTP_NOT_FOUND, ctype)
msg = str(err)
if (util.safehasattr(err, 'name') and
not isinstance(err, error.ManifestLookupError)):
msg = 'revision not found: %s' % err.name
return tmpl('error', error=msg)
except (error.RepoError, error.RevlogError), inst:
req.respond(HTTP_SERVER_ERROR, ctype)
return tmpl('error', error=str(inst))
except ErrorResponse, inst:
req.respond(inst, ctype)
if inst.code == HTTP_NOT_MODIFIED:
# Not allowed to return a body on a 304
return ['']
return tmpl('error', error=inst.message)
def loadwebsub(self):
websubtable = []
websubdefs = self.repo.ui.configitems('websub')
# we must maintain interhg backwards compatibility
websubdefs += self.repo.ui.configitems('interhg')
for key, pattern in websubdefs:
# grab the delimiter from the character after the "s"
unesc = pattern[1]
delim = re.escape(unesc)
# identify portions of the pattern, taking care to avoid escaped
# delimiters. the replace format and flags are optional, but
# delimiters are required.
match = re.match(
r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
% (delim, delim, delim), pattern)
if not match:
self.repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
% (key, pattern))
continue
# we need to unescape the delimiter for regexp and format
delim_re = re.compile(r'(?<!\\)\\%s' % delim)
regexp = delim_re.sub(unesc, match.group(1))
format = delim_re.sub(unesc, match.group(2))
# the pattern allows for 6 regexp flags, so set them if necessary
flagin = match.group(3)
flags = 0
if flagin:
for flag in flagin.upper():
flags |= re.__dict__[flag]
try:
regexp = re.compile(regexp, flags)
websubtable.append((regexp, format))
except re.error:
self.repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
% (key, regexp))
return websubtable
def templater(self, req):
# determine scheme, port and server name
# this is needed to create absolute urls
proto = req.env.get('wsgi.url_scheme')
if proto == 'https':
proto = 'https'
default_port = "443"
else:
proto = 'http'
default_port = "80"
port = req.env["SERVER_PORT"]
port = port != default_port and (":" + port) or ""
urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
logourl = self.config("web", "logourl", "http://mercurial.selenic.com/")
logoimg = self.config("web", "logoimg", "hglogo.png")
staticurl = self.config("web", "staticurl") or req.url + 'static/'
if not staticurl.endswith('/'):
staticurl += '/'
# some functions for the templater
def header(**map):
yield tmpl('header', encoding=encoding.encoding, **map)
def footer(**map):
yield tmpl("footer", **map)
def motd(**map):
yield self.config("web", "motd", "")
# figure out which style to use
vars = {}
styles = (
req.form.get('style', [None])[0],
self.config('web', 'style'),
'paper',
)
style, mapfile = templater.stylemap(styles, self.templatepath)
if style == styles[0]:
vars['style'] = style
start = req.url[-1] == '?' and '&' or '?'
sessionvars = webutil.sessionvars(vars, start)
if not self.reponame:
self.reponame = (self.config("web", "name")
or req.env.get('REPO_NAME')
or req.url.strip('/') or self.repo.root)
def websubfilter(text):
return websub(text, self.websubtable)
# create the templater
tmpl = templater.templater(mapfile,
filters={"websub": websubfilter},
defaults={"url": req.url,
"logourl": logourl,
"logoimg": logoimg,
"staticurl": staticurl,
"urlbase": urlbase,
"repo": self.reponame,
"header": header,
"footer": footer,
"motd": motd,
"sessionvars": sessionvars,
"pathdef": makebreadcrumb(req.url),
})
return tmpl
def archivelist(self, nodeid):
allowed = self.configlist("web", "allow_archive")
for i, spec in self.archive_specs.iteritems():
if i in allowed or self.configbool("web", "allow" + i):
yield {"type" : i, "extension" : spec[2], "node" : nodeid}
archive_specs = {
'bz2': ('application/x-bzip2', 'tbz2', '.tar.bz2', None),
'gz': ('application/x-gzip', 'tgz', '.tar.gz', None),
'zip': ('application/zip', 'zip', '.zip', None),
}
def check_perm(self, req, op):
for hook in permhooks:
hook(self, req, op)
| apache-2.0 | -5,328,603,449,746,074,000 | 37.429293 | 80 | 0.512945 | false |
oconnor663/peru | peru/display.py | 1 | 9334 | import asyncio
import io
import re
import sys
# The display classes deal with output from subprocesses. The FancyDisplay
# gives a multi-line, real-time view of each running process that looks nice in
# the terminal. The VerboseDisplay collects output from each job and prints it
# all when the job is finished, in a way that's suitable for logs. The
# QuietDisplay prints nothing.
#
# All of the display types inherit from BaseDisplay and provide the same
# interface. Callers use get_handle() to get a display handle for each
# subprocess job that's going to run. The handle is used as a context manager
# (inside a with statement) to indicate when the job is starting and stopping,
# and all of the output from the subprocess is passed to the handle's write()
# method. There is also a print() method on the display, for output that's not
# tied to a particular job, which prints to the terminal in a way that won't
# get stomped on by FancyDisplay's redrawing.
#
# Like other errors, we handle job errors by throwing a PrintableError, which
# get caught in main. So the displays don't need to do anything special to show
# errors.
ANSI_CURSOR_UP_ONE_LINE = '\x1b[1A'
ANSI_CLEAR_LINE = '\x1b[2K'
ANSI_DISABLE_LINE_WRAP = '\x1b[?7l'
ANSI_ENABLE_LINE_WRAP = '\x1b[?7h'
class BaseDisplay:
def __init__(self, output=None):
self.output = output or sys.stdout
# Every job/handle gets a unique id.
self._next_job_id = 0
# Output from each job is buffered.
self.buffers = {}
# Each job has a title, like the name of the module being fetched.
self.titles = {}
# We also keep track of any handles that haven't been entered yet, so
# that the FancyDisplay can know when to finally clean up.
self.outstanding_jobs = set()
def get_handle(self, title):
job_id = self._next_job_id
self._next_job_id += 1
self.titles[job_id] = title
self.buffers[job_id] = io.StringIO()
self.outstanding_jobs.add(job_id)
return _DisplayHandle(self, job_id)
# FancyDisplay overrides print() to avoid conflicting with redraws.
def print(self, *args, **kwargs):
print(*args, file=self.output, **kwargs)
# Callbacks that get overridden by subclasses.
def _job_started(self, job_id):
pass
def _job_written(self, job_id, string):
pass
def _job_finished(self, job_id):
pass
# Callbacks for handles.
def _handle_start(self, job_id):
self._job_started(job_id)
def _handle_write(self, job_id, string):
self.buffers[job_id].write(string)
self._job_written(job_id, string)
def _handle_finish(self, job_id):
self.outstanding_jobs.remove(job_id)
self._job_finished(job_id)
class QuietDisplay(BaseDisplay):
'''Prints nothing.'''
pass
class VerboseDisplay(BaseDisplay):
'''Waits until jobs are finished and then prints all of their output at
once, to make sure jobs don't get interleaved. We use '===' as a delimiter
to try to separate jobs from one another, and from other output.'''
def _job_started(self, job_id):
print('===', 'started', self.titles[job_id], '===', file=self.output)
def _job_finished(self, job_id):
print('===', 'finished', self.titles[job_id], '===', file=self.output)
outputstr = self.buffers[job_id].getvalue()
if outputstr:
self.output.write(outputstr)
print('===', file=self.output)
class FancyDisplay(BaseDisplay):
'''Prints a multi-line, real-time display of all the latest output lines
from each job.'''
def __init__(self, *args):
super().__init__(*args)
# Every time we draw we need to erase the lines that were printed
# before. This keeps track of that number. Note that we split output on
# newlines and use no-wrap control codes in the terminal, so we only
# need to count the number of jobs drawn.
self._lines_printed = 0
# This is the list of all active jobs. There's no guarantee that jobs
# start in any particular order, so this list also helps us keep the
# order stable.
self._job_slots = []
# The last line output from each job. This is what gets drawn.
self._output_lines = {}
# Lines that need to be printed above the display. This has to happen
# during the next draw, right after the display is cleared.
self._to_print = []
# To avoid flicker, we draw on a short timeout instead of every time we
# receive output. When this asyncio handle is set, it means a draw is
# already pending.
self._draw_later_handle = None
def print(self, *args, **kwargs):
output = io.StringIO()
print(*args, file=output, **kwargs)
self._to_print.append(output.getvalue())
# If we use _draw_later, the program might exit before the draw timer
# fires. Drawing right now ensures that output never gets dropped.
self._draw()
def _draw(self):
self._cancel_draw_later()
# Erase everything we printed before.
for i in range(self._lines_printed):
self.output.write(ANSI_CURSOR_UP_ONE_LINE)
self.output.write(ANSI_CLEAR_LINE)
self._lines_printed = 0
# If we have any lines from print(), print them now. They will end up
# above the display like regular output.
for string in self._to_print:
self.output.write(string)
self._to_print.clear()
# Redraw all the jobs.
self.output.write(ANSI_DISABLE_LINE_WRAP)
for slot, job_id in enumerate(self._job_slots):
# Fancy unicode box characters in the left column.
if slot == 0:
self.output.write('┌' if len(self._job_slots) > 1 else '╶')
elif slot < len(self._job_slots) - 1:
self.output.write('├')
else:
self.output.write('└')
self.output.write(' ')
self.output.write(self.titles[job_id])
self.output.write(': ')
self.output.write(self._output_lines[job_id])
# Some terminals keep overwriting the last character in no-wrap
# mode. Make the trailing character a space.
self.output.write(' ')
self.output.write('\n')
self._lines_printed += 1
self.output.write(ANSI_ENABLE_LINE_WRAP)
# Finally, flush output to the terminal. Hopefully everything gets
# painted in one frame.
self.output.flush()
def _draw_later(self):
if self._draw_later_handle:
# There is already a draw pending.
return
self._draw_later_handle = asyncio.get_event_loop().call_later(
0.1, self._draw)
def _cancel_draw_later(self):
if self._draw_later_handle:
self._draw_later_handle.cancel()
self._draw_later_handle = None
def _job_started(self, job_id):
self._job_slots.append(job_id)
self._output_lines[job_id] = ''
self._draw_later()
def _job_written(self, job_id, string):
# We need to split output on newlines. Some programs (git) also use
# carriage return to redraw a line, so we split on that too.
any_newlines = '(?:\n|\r)+' # (?: is non-capturing, for split()
lines = [line.strip() for line in re.split(any_newlines, string)]
# NB: We don't make any attempt here to join lines that might span
# multiple write() calls. `create_subprocess_with_handle()` reads
# output in 4096 byte chunks, so this isn't likely, but it's possible.
for line in lines:
# Ignore empty lines, both from the job and from re.split().
if line:
self._output_lines[job_id] = line
self._draw_later()
def _job_finished(self, job_id):
self._job_slots.remove(job_id)
if not self.outstanding_jobs:
# If the last job is finished, the event loop might be about to
# stop. Clear the terminal right now, because _draw_later might
# never run.
self._draw()
else:
# If there are pending jobs, don't clear the display immediately.
# This avoids flickering between jobs when only one job is running
# at a time (-j1).
self._draw_later()
class _DisplayHandle:
def __init__(self, display, job_id):
self._display = display
self._job_id = job_id
self._opened = False
self._closed = False
def write(self, string):
assert self._opened and not self._closed
self._display._handle_write(self._job_id, string)
# Context manager interface. We're extra careful to make sure that the
# handle is only written to inside a with statment, and only used once.
def __enter__(self):
assert not self._opened and not self._closed
self._opened = True
self._display._handle_start(self._job_id)
return self
def __exit__(self, *args):
assert self._opened and not self._closed
self._display._handle_finish(self._job_id)
self._job_id = None
self._closed = True
| mit | 434,639,281,018,792,770 | 37.53719 | 79 | 0.619987 | false |
Cyberjusticelab/JusticeAI | src/ml_service/feature_extraction/post_processing/regex/regex_entity_extraction.py | 1 | 7500 | from feature_extraction.post_processing.regex.regex_lib import RegexLib
import re
import datetime
import time
import unicodedata
from util.log import Log
import math
class EntityExtraction:
regex_bin = None
one_month = 86400 * 30 # unix time for 1 month
month_dict = {
'janvier': 1,
'fevrier': 2,
'mars': 3,
'avril': 4,
'mai': 5,
'juin': 6,
'juillet': 7,
'aout': 8,
'septembre': 9,
"octobre": 10,
'novembre': 11,
'decembre': 12
}
def __init__(self):
pass
@staticmethod
def match_any_regex(text, regex_array, regex_type):
"""
1) Loads the regex binaries only once. If it is loaded then continue.
2) Iterate all the regex and search text
3) if regex finds a match then extract entity from this sub sentence
:param text: String representation of precedent
:param regex_array: List of regex
:param regex_type: Entity we look for in a particular regex match
:return: (Boolean, entity<int>)
"""
if EntityExtraction.regex_bin is None:
EntityExtraction.regex_bin = RegexLib.model
for regex in regex_array:
regex_result = regex.search(text)
if regex_result:
sentence = regex_result.group(0).lower()
return EntityExtraction.__extract_regex_entity(sentence, regex_type)
return False, 0
@staticmethod
def __extract_regex_entity(sentence, regex_type):
"""
Entity extraction from the text
1) If the type is BOOLEAN then simply return True, 1
2) If the type is MONEY_REGEX then extract the money value and format string so that it is
convertible to integer
3) else return False, 1
:param sentence: sub sentence from text to apply regex
:param regex_type: type of information to extract
:return: (boolean, int)
"""
# removes accents
nfkd_form = unicodedata.normalize('NFKD', sentence)
sentence = u"".join([character for character in nfkd_form if not unicodedata.combining(character)])
if regex_type == 'BOOLEAN':
return True, 1
elif regex_type == 'MONEY_REGEX':
return EntityExtraction.__regex_money(regex_type, sentence)
elif regex_type == 'DATE_REGEX':
return EntityExtraction.get_fact_duration(sentence)
return False, 0
@staticmethod
def get_fact_duration(sentence):
"""
Tries to find date range within a sentence by trying to match it against regexes.
First regex looks for the following format: 1er decembre 20** [a|au ...] 30 mai 20**
Second regex looks for 1 or more months being stated
convert to unix.
1) unless specified, start date is assumes to be the first day of the month
2) unless specified, end date is assume to be the last day of the month. 28 is chosen because
every month have at least 28 days
The information captured be the regexes above allows us to get the time difference in days
:param sentence: sentence to extract entities
:return: boolean (date found), integer (months between dates)
"""
# Verify if the sentence is about non-payment
non_payment_regex = re.compile("pas paye", re.IGNORECASE)
if re.findall(non_payment_regex, sentence).__len__() == 0:
return False, 0
# First regex
start_end_date_regex = re.compile(RegexLib.DATE_RANGE_REGEX, re.IGNORECASE)
entities = re.findall(start_end_date_regex, sentence)
if entities.__len__() > 0:
entities = re.findall(start_end_date_regex, sentence).pop(0)
try:
start_day = int(entities[0])
except ValueError as error:
Log.write(str(error) + ": could not convert " + entities[0] + " to an int")
start_day = '1'
start_month = ''
try:
start_month = str(EntityExtraction.month_dict[entities[1]])
except IndexError as error:
Log.write(str(error) + ":" + str(start_month) + " is not a month or has spelling mistake")
return False, 0
try:
start_year = int(entities[2])
except ValueError as error:
Log.write(str(error) + ": could not find start year")
start_year = entities[5] # set end year value
try:
end_day = int(entities[3])
except ValueError as error:
Log.write(str(error) + ": could not convert " + entities[3] + " to an int")
end_day = '28'
end_month = ''
try:
end_month = str(EntityExtraction.month_dict[entities[4]])
except IndexError as error:
Log.write(str(error) + ":" + str(end_month) + " is not a month or has spelling mistake")
return False, 0
end_year = entities[5]
start_unix = EntityExtraction.__date_to_unix([str(start_day), str(start_month), str(start_year)])
end_unix = EntityExtraction.__date_to_unix([str(end_day), str(end_month), str(end_year)])
return True, EntityExtraction.__get_time_interval_in_months(start_unix, end_unix)
# Second regex
month_regex = re.compile(RegexLib.DATE_REGEX, re.IGNORECASE)
entities = re.findall(month_regex, sentence)
if entities.__len__() > 0:
return True, entities.__len__() # total months found
return False, 0
@staticmethod
def __regex_money(regex_type, sentence):
"""
1) create the date regex --> re.compile(regex string)
2) Find the dollar amount in the sentence
3) filter the string by removing unecessary characters
4) return the entity
:param regex_type: str(MONEY_REGEX)
:param sentence: boolean, integer
:return:
"""
generic_regex = re.compile(EntityExtraction.regex_bin[regex_type])
entity = generic_regex.search(sentence).group(0)
# Functional but not sure about how optimal it is
entity = entity.replace("$", "")
entity = entity.replace(" ", "")
entity = entity.replace(",", ".")
if entity[-1] == '.':
entity = entity[:-1]
return True, entity
@staticmethod
def __date_to_unix(date):
"""
Given a date list (ex: [30,12,2019]) this function gets the unix time that represents this date
:param date: date to convert into unix time
:return: unix time representing the input date
"""
date_string = " ".join(date)
try:
unix_time = time.mktime(datetime.datetime.strptime(date_string, '%d %m %Y').timetuple())
except (ValueError, OverflowError) as error:
Log.write(str(error) + ": " + str(date_string))
return None
return unix_time
@staticmethod
def __get_time_interval_in_months(first_date, second_date):
"""
Calculates the time difference between 2 dates
:param first_date: date in unix time
:param second_date: date in unix time
:return: time difference between 2 dates
"""
return math.ceil(abs(first_date - second_date) / EntityExtraction.one_month)
| mit | 8,145,534,708,138,904,000 | 35.945813 | 109 | 0.587067 | false |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/lib2to3/main.py | 250 | 11605 | """
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error, err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error, err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print >> sys.stderr, "WARNING: %s" % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print "Available transformations for the -f/--fix option:"
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, "At least one file or directory argument required."
print >> sys.stderr, "Use --help to show usage."
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print >> sys.stderr, "Sorry, -j isn't " \
"supported on this platform."
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| mit | -4,991,081,407,455,078,000 | 42.141264 | 80 | 0.576993 | false |
jocelynmass/nrf51 | toolchain/arm_cm0_deprecated/arm-none-eabi/share/gdb/python/gdb/frames.py | 68 | 8031 | # Frame-filter commands.
# Copyright (C) 2013-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Internal functions for working with frame-filters."""
import gdb
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import itertools
import collections
def get_priority(filter_item):
""" Internal worker function to return the frame-filter's priority
from a frame filter object. This is a fail free function as it is
used in sorting and filtering. If a badly implemented frame
filter does not implement the priority attribute, return zero
(otherwise sorting/filtering will fail and prevent other frame
filters from executing).
Arguments:
filter_item: An object conforming to the frame filter
interface.
Returns:
The priority of the frame filter from the "priority"
attribute, or zero.
"""
# Do not fail here, as the sort will fail. If a filter has not
# (incorrectly) set a priority, set it to zero.
return getattr(filter_item, "priority", 0)
def set_priority(filter_item, priority):
""" Internal worker function to set the frame-filter's priority.
Arguments:
filter_item: An object conforming to the frame filter
interface.
priority: The priority to assign as an integer.
"""
filter_item.priority = priority
def get_enabled(filter_item):
""" Internal worker function to return a filter's enabled state
from a frame filter object. This is a fail free function as it is
used in sorting and filtering. If a badly implemented frame
filter does not implement the enabled attribute, return False
(otherwise sorting/filtering will fail and prevent other frame
filters from executing).
Arguments:
filter_item: An object conforming to the frame filter
interface.
Returns:
The enabled state of the frame filter from the "enabled"
attribute, or False.
"""
# If the filter class is badly implemented when called from the
# Python filter command, do not cease filter operations, just set
# enabled to False.
return getattr(filter_item, "enabled", False)
def set_enabled(filter_item, state):
""" Internal Worker function to set the frame-filter's enabled
state.
Arguments:
filter_item: An object conforming to the frame filter
interface.
state: True or False, depending on desired state.
"""
filter_item.enabled = state
def return_list(name):
""" Internal Worker function to return the frame filter
dictionary, depending on the name supplied as an argument. If the
name is not "all", "global" or "progspace", it is assumed to name
an object-file.
Arguments:
name: The name of the list, as specified by GDB user commands.
Returns:
A dictionary object for a single specified dictionary, or a
list containing all the items for "all"
Raises:
gdb.GdbError: A dictionary of that name cannot be found.
"""
# If all dictionaries are wanted in the case of "all" we
# cannot return a combined dictionary as keys() may clash in
# between different dictionaries. As we just want all the frame
# filters to enable/disable them all, just return the combined
# items() as a chained iterator of dictionary values.
if name == "all":
glob = gdb.frame_filters.values()
prog = gdb.current_progspace().frame_filters.values()
return_iter = itertools.chain(glob, prog)
for objfile in gdb.objfiles():
return_iter = itertools.chain(return_iter, objfile.frame_filters.values())
return return_iter
if name == "global":
return gdb.frame_filters
else:
if name == "progspace":
cp = gdb.current_progspace()
return cp.frame_filters
else:
for objfile in gdb.objfiles():
if name == objfile.filename:
return objfile.frame_filters
msg = "Cannot find frame-filter dictionary for '" + name + "'"
raise gdb.GdbError(msg)
def _sort_list():
""" Internal Worker function to merge all known frame-filter
lists, prune any filters with the state set to "disabled", and
sort the list on the frame-filter's "priority" attribute.
Returns:
sorted_list: A sorted, pruned list of frame filters to
execute.
"""
all_filters = return_list("all")
sorted_frame_filters = sorted(all_filters, key = get_priority,
reverse = True)
sorted_frame_filters = filter(get_enabled,
sorted_frame_filters)
return sorted_frame_filters
def execute_frame_filters(frame, frame_low, frame_high):
""" Internal function called from GDB that will execute the chain
of frame filters. Each filter is executed in priority order.
After the execution completes, slice the iterator to frame_low -
frame_high range.
Arguments:
frame: The initial frame.
frame_low: The low range of the slice. If this is a negative
integer then it indicates a backward slice (ie bt -4) which
counts backward from the last frame in the backtrace.
frame_high: The high range of the slice. If this is -1 then
it indicates all frames until the end of the stack from
frame_low.
Returns:
frame_iterator: The sliced iterator after all frame
filters have had a change to execute, or None if no frame
filters are registered.
"""
# Get a sorted list of frame filters.
sorted_list = list(_sort_list())
# Check to see if there are any frame-filters. If not, just
# return None and let default backtrace printing occur.
if len(sorted_list) == 0:
return None
frame_iterator = FrameIterator(frame)
# Apply a basic frame decorator to all gdb.Frames. This unifies
# the interface. Python 3.x moved the itertools.imap
# functionality to map(), so check if it is available.
if hasattr(itertools,"imap"):
frame_iterator = itertools.imap(FrameDecorator, frame_iterator)
else:
frame_iterator = map(FrameDecorator, frame_iterator)
for ff in sorted_list:
frame_iterator = ff.filter(frame_iterator)
# Slicing
# Is this a slice from the end of the backtrace, ie bt -2?
if frame_low < 0:
count = 0
slice_length = abs(frame_low)
# We cannot use MAXLEN argument for deque as it is 2.6 onwards
# and some GDB versions might be < 2.6.
sliced = collections.deque()
for frame_item in frame_iterator:
if count >= slice_length:
sliced.popleft();
count = count + 1
sliced.append(frame_item)
return iter(sliced)
# -1 for frame_high means until the end of the backtrace. Set to
# None if that is the case, to indicate to itertools.islice to
# slice to the end of the iterator.
if frame_high == -1:
frame_high = None
else:
# As frames start from 0, add one to frame_high so islice
# correctly finds the end
frame_high = frame_high + 1;
sliced = itertools.islice(frame_iterator, frame_low, frame_high)
return sliced
| gpl-2.0 | 6,565,255,743,960,707,000 | 34.223684 | 86 | 0.661686 | false |
magne4000/festival | app.py | 1 | 1221 | import os
import re
from flask import Flask
from datetime import timedelta
def interval_to_timedelta(interval):
if isinstance(interval, int):
interval = "%ds" % interval
ratios = {
's': 'seconds',
'm': 'minutes',
'h': 'hours',
'd': 'days',
'w': 'weeks'
}
return timedelta(**{ratios[interval[-1:]]: int(interval[0:-1])})
def shape_config(myapp, args):
myapp.config.from_pyfile(args.config or os.path.join(os.path.dirname(__file__), 'settings.cfg'))
myapp.config['SCANNER_MODES'] = ['tags']
if myapp.config['SCANNER_FOLDER_PATTERNS'] is not None and len(myapp.config['SCANNER_FOLDER_PATTERNS']) > 0:
myapp.config['SCANNER_MODES'].append('folder')
for i, pattern in enumerate(myapp.config['SCANNER_FOLDER_PATTERNS']):
myapp.config['SCANNER_FOLDER_PATTERNS'][i] = re.compile(pattern)
myapp.config['COVERS_FETCH_ONLINE_INTERVAL'] = interval_to_timedelta(myapp.config['COVERS_FETCH_ONLINE_INTERVAL'])
myapp.config['SCANNER_REFRESH_INTERVAL'] = interval_to_timedelta(myapp.config['SCANNER_REFRESH_INTERVAL'])
return myapp
def get_app(args):
myapp = Flask(__name__)
return shape_config(myapp, args)
| mit | -1,065,424,582,196,639,200 | 34.911765 | 118 | 0.652744 | false |
anbasile/flask_sample | flask/lib/python2.7/site-packages/flask/testsuite/blueprints.py | 563 | 28089 | # -*- coding: utf-8 -*-
"""
flask.testsuite.blueprints
~~~~~~~~~~~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
import warnings
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from flask._compat import text_type
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
# import moduleapp here because it uses deprecated features and we don't
# want to see the warnings
warnings.simplefilter('ignore', DeprecationWarning)
from moduleapp import app as moduleapp
warnings.simplefilter('default', DeprecationWarning)
class ModuleTestCase(FlaskTestCase):
@emits_module_deprecation_warning
def test_basic_module(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@admin.route('/')
def admin_index():
return 'admin index'
@admin.route('/login')
def admin_login():
return 'admin login'
@admin.route('/logout')
def admin_logout():
return 'admin logout'
@app.route('/')
def index():
return 'the index'
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'the index')
self.assert_equal(c.get('/admin/').data, b'admin index')
self.assert_equal(c.get('/admin/login').data, b'admin login')
self.assert_equal(c.get('/admin/logout').data, b'admin logout')
@emits_module_deprecation_warning
def test_default_endpoint_name(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'frontend')
def index():
return 'Awesome'
mod.add_url_rule('/', view_func=index)
app.register_module(mod)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'Awesome')
with app.test_request_context():
self.assert_equal(flask.url_for('frontend.index'), '/')
@emits_module_deprecation_warning
def test_request_processing(self):
catched = []
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@admin.before_request
def before_admin_request():
catched.append('before-admin')
@admin.after_request
def after_admin_request(response):
catched.append('after-admin')
return response
@admin.route('/')
def admin_index():
return 'the admin'
@app.before_request
def before_request():
catched.append('before-app')
@app.after_request
def after_request(response):
catched.append('after-app')
return response
@app.route('/')
def index():
return 'the index'
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'the index')
self.assert_equal(catched, ['before-app', 'after-app'])
del catched[:]
self.assert_equal(c.get('/admin/').data, b'the admin')
self.assert_equal(catched, ['before-app', 'before-admin',
'after-admin', 'after-app'])
@emits_module_deprecation_warning
def test_context_processors(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@app.context_processor
def inject_all_regular():
return {'a': 1}
@admin.context_processor
def inject_admin():
return {'b': 2}
@admin.app_context_processor
def inject_all_module():
return {'c': 3}
@app.route('/')
def index():
return flask.render_template_string('{{ a }}{{ b }}{{ c }}')
@admin.route('/')
def admin_index():
return flask.render_template_string('{{ a }}{{ b }}{{ c }}')
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'13')
self.assert_equal(c.get('/admin/').data, b'123')
@emits_module_deprecation_warning
def test_late_binding(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin')
@admin.route('/')
def index():
return '42'
app.register_module(admin, url_prefix='/admin')
self.assert_equal(app.test_client().get('/admin/').data, b'42')
@emits_module_deprecation_warning
def test_error_handling(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin')
@admin.app_errorhandler(404)
def not_found(e):
return 'not found', 404
@admin.app_errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@admin.route('/')
def index():
flask.abort(404)
@admin.route('/error')
def error():
1 // 0
app.register_module(admin)
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal(b'internal server error', rv.data)
def test_templates_and_static(self):
app = moduleapp
app.testing = True
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello from the Frontend')
rv = c.get('/admin/')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/index2')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/static/test.txt')
self.assert_equal(rv.data.strip(), b'Admin File')
rv.close()
rv = c.get('/admin/static/css/test.css')
self.assert_equal(rv.data.strip(), b'/* nested file */')
rv.close()
with app.test_request_context():
self.assert_equal(flask.url_for('admin.static', filename='test.txt'),
'/admin/static/test.txt')
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
self.assert_equal(e.name, 'missing.html')
else:
self.assert_true(0, 'expected exception')
with flask.Flask(__name__).test_request_context():
self.assert_equal(flask.render_template('nested/nested.txt'), 'I\'m nested')
def test_safe_access(self):
app = moduleapp
with app.test_request_context():
f = app.view_functions['admin.static']
try:
f('/etc/passwd')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
try:
f('../__init__.py')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
# testcase for a security issue that may exist on windows systems
import os
import ntpath
old_path = os.path
os.path = ntpath
try:
try:
f('..\\__init__.py')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
finally:
os.path = old_path
@emits_module_deprecation_warning
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
from flask import Module
app = flask.Flask(__name__)
app.testing = True
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
module = Module(__name__, __name__)
@module.endpoint('bar')
def bar():
return 'bar'
@module.endpoint('index')
def index():
return 'index'
app.register_module(module)
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
class BlueprintTestCase(FlaskTestCase):
def test_blueprint_specific_error_handling(self):
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
self.assert_equal(c.get('/frontend-no').data, b'frontend says no')
self.assert_equal(c.get('/backend-no').data, b'backend says no')
self.assert_equal(c.get('/what-is-a-sideend').data, b'application itself says no')
def test_blueprint_url_definitions(self):
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
self.assert_equal(c.get('/1/foo').data, b'23/42')
self.assert_equal(c.get('/2/foo').data, b'19/42')
self.assert_equal(c.get('/1/bar').data, b'23')
self.assert_equal(c.get('/2/bar').data, b'19')
def test_blueprint_url_processors(self):
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
self.assert_equal(c.get('/de/').data, b'/de/about')
self.assert_equal(c.get('/de/about').data, b'/de/')
def test_templates_and_static(self):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello from the Frontend')
rv = c.get('/admin/')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/index2')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/static/test.txt')
self.assert_equal(rv.data.strip(), b'Admin File')
rv.close()
rv = c.get('/admin/static/css/test.css')
self.assert_equal(rv.data.strip(), b'/* nested file */')
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, expected_max_age)
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
self.assert_equal(flask.url_for('admin.static', filename='test.txt'),
'/admin/static/test.txt')
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
self.assert_equal(e.name, 'missing.html')
else:
self.assert_true(0, 'expected exception')
with flask.Flask(__name__).test_request_context():
self.assert_equal(flask.render_template('nested/nested.txt'), 'I\'m nested')
def test_default_static_cache_timeout(self):
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 100)
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(self):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
self.assert_equal(templates, ['admin/index.html',
'frontend/index.html'])
def test_dotted_names(self):
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
self.assert_equal(c.get('/fe').data.strip(), b'/be')
self.assert_equal(c.get('/fe2').data.strip(), b'/fe')
self.assert_equal(c.get('/be').data.strip(), b'/fe')
def test_dotted_names_from_app(self):
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'/test/')
def test_empty_url_defaults(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
self.assert_equal(c.get('/').data, b'1')
self.assert_equal(c.get('/page/2').data, b'2')
def test_route_decorator_custom_endpoint(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
self.assertEqual(c.get('/').data, b'index')
self.assertEqual(c.get('/py/foo').data, b'bp.foo')
self.assertEqual(c.get('/py/bar').data, b'bp.bar')
self.assertEqual(c.get('/py/bar/123').data, b'bp.123')
self.assertEqual(c.get('/py/bar/foo').data, b'bp.bar_foo')
def test_route_decorator_custom_endpoint_with_dots(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
self.assertRaises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
self.assertRaises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
self.assertEqual(c.get('/py/foo').data, b'bp.foo')
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_after_route_with_template(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_template(self):
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_test(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('is_boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['is_boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['is_boolean'](False))
def test_add_template_test(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('is_boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['is_boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['is_boolean'](False))
def test_template_test_with_name(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test_with_name(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_after_route_with_template(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_template(self):
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BlueprintTestCase))
suite.addTest(unittest.makeSuite(ModuleTestCase))
return suite
| mit | 3,767,052,625,817,690,600 | 34.555696 | 90 | 0.559792 | false |
amir-qayyum-khan/edx-platform | lms/djangoapps/courseware/tests/test_self_paced_overrides.py | 23 | 6451 | """Tests for self-paced course due date overrides."""
# pylint: disable=missing-docstring
import datetime
import pytz
from django.test.utils import override_settings
from mock import patch
from courseware.tests.factories import BetaTesterFactory
from courseware.access import has_access
from lms.djangoapps.ccx.tests.test_overrides import inject_field_overrides
from lms.djangoapps.django_comment_client.utils import get_accessible_discussion_xblocks
from lms.djangoapps.courseware.field_overrides import OverrideFieldData, OverrideModulestoreFieldData
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@override_settings(
XBLOCK_FIELD_DATA_WRAPPERS=['lms.djangoapps.courseware.field_overrides:OverrideModulestoreFieldData.wrap'],
MODULESTORE_FIELD_OVERRIDE_PROVIDERS=['courseware.self_paced_overrides.SelfPacedDateOverrideProvider'],
)
class SelfPacedDateOverrideTest(ModuleStoreTestCase):
"""
Tests for self-paced due date overrides.
"""
def setUp(self):
self.reset_setting_cache_variables()
super(SelfPacedDateOverrideTest, self).setUp()
SelfPacedConfiguration(enabled=True).save()
self.non_staff_user, __ = self.create_non_staff_user()
self.now = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
self.future = self.now + datetime.timedelta(days=30)
def tearDown(self):
self.reset_setting_cache_variables()
super(SelfPacedDateOverrideTest, self).tearDown()
def reset_setting_cache_variables(self):
"""
The overridden settings for this class get cached on class variables.
Reset those to None before and after running the test to ensure clean
behavior.
"""
OverrideFieldData.provider_classes = None
OverrideModulestoreFieldData.provider_classes = None
def setup_course(self, **course_kwargs):
"""Set up a course with provided course attributes.
Creates a child block with a due date, and ensures that field
overrides are correctly applied for both blocks.
"""
course = CourseFactory.create(**course_kwargs)
section = ItemFactory.create(parent=course, due=self.now)
inject_field_overrides((course, section), course, self.user)
return (course, section)
def create_discussion_xblocks(self, parent):
# Create a released discussion xblock
ItemFactory.create(
parent=parent,
category='discussion',
display_name='released',
start=self.now,
)
# Create a scheduled discussion xblock
ItemFactory.create(
parent=parent,
category='discussion',
display_name='scheduled',
start=self.future,
)
def test_instructor_paced_due_date(self):
__, ip_section = self.setup_course(display_name="Instructor Paced Course", self_paced=False)
self.assertEqual(ip_section.due, self.now)
def test_self_paced_due_date(self):
__, sp_section = self.setup_course(display_name="Self-Paced Course", self_paced=True)
self.assertIsNone(sp_section.due)
def test_self_paced_disabled_due_date(self):
SelfPacedConfiguration(enabled=False).save()
__, sp_section = self.setup_course(display_name="Self-Paced Course", self_paced=True)
self.assertEqual(sp_section.due, self.now)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_course_access_to_beta_users(self):
"""
Test that beta testers can access `self_paced` course prior to start date.
"""
now = datetime.datetime.now(pytz.UTC)
one_month_from_now = now + datetime.timedelta(days=30)
course_options = {
'days_early_for_beta': 100,
'self_paced': True,
'start': one_month_from_now,
}
# Create a `self_paced` course and add a beta tester in it
self_paced_course, self_paced_section = self.setup_course(**course_options)
beta_tester = BetaTesterFactory(course_key=self_paced_course.id)
# Verify course is `self_paced` and course has start date but not section.
self.assertTrue(self_paced_course.self_paced)
self.assertEqual(self_paced_course.start, one_month_from_now)
self.assertIsNone(self_paced_section.start)
# Verify that non-staff user do not have access to the course
self.assertFalse(has_access(self.non_staff_user, 'load', self_paced_course))
# Verify beta tester can access the course as well as the course sections
self.assertTrue(has_access(beta_tester, 'load', self_paced_course))
self.assertTrue(has_access(beta_tester, 'load', self_paced_section, self_paced_course.id))
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_instructor_paced_discussion_xblock_visibility(self):
"""
Verify that discussion xblocks scheduled for release in the future are
not visible to students in an instructor-paced course.
"""
course, section = self.setup_course(start=self.now, self_paced=False)
self.create_discussion_xblocks(section)
# Only the released xblocks should be visible when the course is instructor-paced.
xblocks = get_accessible_discussion_xblocks(course, self.non_staff_user)
self.assertTrue(
all(xblock.display_name == 'released' for xblock in xblocks)
)
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_self_paced_discussion_xblock_visibility(self):
"""
Regression test. Verify that discussion xblocks scheduled for release
in the future are visible to students in a self-paced course.
"""
course, section = self.setup_course(start=self.now, self_paced=True)
self.create_discussion_xblocks(section)
# The scheduled xblocks should be visible when the course is self-paced.
xblocks = get_accessible_discussion_xblocks(course, self.non_staff_user)
self.assertEqual(len(xblocks), 2)
self.assertTrue(
any(xblock.display_name == 'scheduled' for xblock in xblocks)
)
| agpl-3.0 | 5,706,769,903,066,313,000 | 42.295302 | 111 | 0.687335 | false |
jfpla/odoo | openerp/addons/base/ir/ir_model.py | 7 | 60967 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': 'manual',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','manual')=='manual':
# add model in registry
self.instanciate(cr, user, vals['model'], context)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': 'manual',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might have be inherited, and registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','manual') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
# re-initialize model in registry
model.__init__(self.pool, cr)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
# if set, *one* column can be renamed here
column_rename = None
# names of the models to patch
patched_models = set()
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'manual') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
patched_models.add(obj._name)
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
self.pool.clear_manual_fields()
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
if column_rename or patched_models:
# setup models, this will reload all manual fields in registry
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patched_models:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context,
select=vals.get('select_level', '0'),
update_custom_fields=True,
)
for model_name in patched_models:
obj = self.pool[model_name]
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patched_models:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@tools.ormcache_context(accepted_keys=('lang',))
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
res = super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
return res
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
id = False
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(cr, uid, module, xml_id)
if record:
id = record.id
self.loads[(module,xml_id)] = (model,id)
for table, inherit_field in self.pool[model]._inherits.iteritems():
parent_id = record[inherit_field].id
parent_xid = '%s_%s' % (xml_id, table.replace('.', '_'))
self.loads[(module, parent_xid)] = (table, parent_id)
except Exception:
pass
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, SUPERUSER_ID, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
inherit_xml_ids = []
if xml_id:
for table, field_name in model_obj._inherits.items():
xml_ids = self.pool['ir.model.data'].search(cr, uid, [
('module', '=', module),
('name', '=', xml_id + '_' + table.replace('.', '_')),
], context=context)
# XML ID found in the database, try to recover an existing record
if xml_ids:
found_xml_id = self.pool['ir.model.data'].browse(cr, uid, xml_ids[0], context=context)
record = self.pool[found_xml_id.model].browse(cr, uid, [found_xml_id.res_id], context=context)[0]
# The record exists, store the id and don't recreate the XML ID
if record.exists():
inherit_xml_ids.append(found_xml_id.model)
values[field_name] = found_xml_id.res_id
# Orphan XML ID, delete it
else:
found_xml_id.unlink()
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
if table in inherit_xml_ids:
continue
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, SUPERUSER_ID, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, SUPERUSER_ID, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or config.get('import_partial'):
return True
bad_imd_ids = []
context = {MODULE_UNINSTALL_FLAG: True}
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
""", (tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module, name) not in self.loads:
if model in self.pool:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
if self.pool[model].exists(cr, uid, [res_id], context=context):
self.pool[model].unlink(cr, uid, [res_id], context=context)
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.unlink(cr, uid, bad_imd_ids, context=context)
self.loads.clear()
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 308,184,714,977,258,300 | 47.42494 | 207 | 0.557351 | false |
job/exscript | tests/Exscript/util/interactTest.py | 6 | 2739 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
from tempfile import NamedTemporaryFile
import Exscript.util.interact
from Exscript.util.interact import InputHistory
class InputHistoryTest(unittest.TestCase):
CORRELATE = InputHistory
def setUp(self):
t = NamedTemporaryFile()
self.history = InputHistory(t.name)
def testConstructor(self):
t = NamedTemporaryFile()
h = InputHistory()
h = InputHistory(t.name)
h = InputHistory(t.name, 'foo')
h.set('aaa', 'bbb')
self.assertEqual(open(t.name).read(), '[foo]\naaa = bbb\n\n')
def testGet(self):
self.assertEqual(self.history.get('bar'), None)
self.assertEqual(self.history.get('bar', None), None)
self.assertEqual(self.history.get('bar', '...'), '...')
self.history.set('bar', 'myvalue')
self.assertEqual(self.history.get('bar'), 'myvalue')
self.assertEqual(self.history.get('bar', '...'), 'myvalue')
self.assertEqual(self.history.get('bar', None), 'myvalue')
def testSet(self):
self.testGet()
self.history.set('bar', 'myvalue2')
self.assertEqual(self.history.get('bar'), 'myvalue2')
self.assertEqual(self.history.get('bar', '...'), 'myvalue2')
self.assertEqual(self.history.get('bar', None), 'myvalue2')
self.history.set('bar', None)
self.assertEqual(self.history.get('bar'), 'myvalue2')
self.assertEqual(self.history.get('bar', '...'), 'myvalue2')
self.assertEqual(self.history.get('bar', None), 'myvalue2')
class interactTest(unittest.TestCase):
CORRELATE = Exscript.util.interact
def testPrompt(self):
from Exscript.util.interact import prompt
# Can't really be tested, as it is interactive.
def testGetFilename(self):
from Exscript.util.interact import get_filename
# Can't really be tested, as it is interactive.
def testGetUser(self):
from Exscript.util.interact import get_user
# Can't really be tested, as it is interactive.
def testGetLogin(self):
from Exscript.util.interact import get_login
# Can't really be tested, as it is interactive.
def testReadLogin(self):
from Exscript.util.interact import read_login
# Can't really be tested, as it is interactive.
def suite():
loader = unittest.TestLoader()
thesuite = unittest.TestSuite()
thesuite.addTest(loader.loadTestsFromTestCase(InputHistoryTest))
thesuite.addTest(loader.loadTestsFromTestCase(interactTest))
return thesuite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 | -6,607,437,794,478,139,000 | 36.520548 | 84 | 0.648412 | false |
ntt-sic/cinder | cinder/tests/backup/fake_swift_client.py | 5 | 4331 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import json
import os
import socket
import zlib
from cinder.openstack.common import log as logging
from swiftclient import client as swift
LOG = logging.getLogger(__name__)
class FakeSwiftClient(object):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
pass
@classmethod
def Connection(self, *args, **kargs):
LOG.debug("fake FakeSwiftClient Connection")
return FakeSwiftConnection()
class FakeSwiftConnection(object):
"""Logging calls instead of executing"""
def __init__(self, *args, **kwargs):
pass
def head_container(self, container):
LOG.debug("fake head_container(%s)" % container)
if container == 'missing_container':
raise swift.ClientException('fake exception',
http_status=httplib.NOT_FOUND)
elif container == 'unauthorized_container':
raise swift.ClientException('fake exception',
http_status=httplib.UNAUTHORIZED)
elif container == 'socket_error_on_head':
raise socket.error(111, 'ECONNREFUSED')
pass
def put_container(self, container):
LOG.debug("fake put_container(%s)" % container)
pass
def get_container(self, container, **kwargs):
LOG.debug("fake get_container(%s)" % container)
fake_header = None
fake_body = [{'name': 'backup_001'},
{'name': 'backup_002'},
{'name': 'backup_003'}]
return fake_header, fake_body
def head_object(self, container, name):
LOG.debug("fake put_container(%s, %s)" % (container, name))
return {'etag': 'fake-md5-sum'}
def get_object(self, container, name):
LOG.debug("fake get_object(%s, %s)" % (container, name))
if container == 'socket_error_on_get':
raise socket.error(111, 'ECONNREFUSED')
if 'metadata' in name:
fake_object_header = None
metadata = {}
if container == 'unsupported_version':
metadata['version'] = '9.9.9'
else:
metadata['version'] = '1.0.0'
metadata['backup_id'] = 123
metadata['volume_id'] = 123
metadata['backup_name'] = 'fake backup'
metadata['backup_description'] = 'fake backup description'
metadata['created_at'] = '2013-02-19 11:20:54,805'
metadata['objects'] = [{
'backup_001': {'compression': 'zlib', 'length': 10},
'backup_002': {'compression': 'zlib', 'length': 10},
'backup_003': {'compression': 'zlib', 'length': 10}
}]
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
fake_object_body = metadata_json
return (fake_object_header, fake_object_body)
fake_header = None
fake_object_body = os.urandom(1024 * 1024)
return (fake_header, zlib.compress(fake_object_body))
def put_object(self, container, name, reader, content_length=None,
etag=None, chunk_size=None, content_type=None,
headers=None, query_string=None):
LOG.debug("fake put_object(%s, %s)" % (container, name))
if container == 'socket_error_on_put':
raise socket.error(111, 'ECONNREFUSED')
return 'fake-md5-sum'
def delete_object(self, container, name):
LOG.debug("fake delete_object(%s, %s)" % (container, name))
if container == 'socket_error_on_delete':
raise socket.error(111, 'ECONNREFUSED')
pass
| apache-2.0 | 3,122,307,699,989,526,000 | 37.327434 | 78 | 0.59432 | false |
sfstpala/Victory-Chat | markdown/extensions/html_tidy.py | 2 | 2089 | #!/usr/bin/env python
"""
HTML Tidy Extension for Python-Markdown
=======================================
Runs [HTML Tidy][] on the output of Python-Markdown using the [uTidylib][]
Python wrapper. Both libtidy and uTidylib must be installed on your system.
Note than any Tidy [options][] can be passed in as extension configs. So,
for example, to output HTML rather than XHTML, set ``output_xhtml=0``. To
indent the output, set ``indent=auto`` and to have Tidy wrap the output in
``<html>`` and ``<body>`` tags, set ``show_body_only=0``.
[HTML Tidy]: http://tidy.sourceforge.net/
[uTidylib]: http://utidylib.berlios.de/
[options]: http://tidy.sourceforge.net/docs/quickref.html
Copyright (c)2008 [Waylan Limberg](http://achinghead.com)
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
* [HTML Tidy](http://utidylib.berlios.de/)
* [uTidylib](http://utidylib.berlios.de/)
"""
import markdown
import tidy
class TidyExtension(markdown.Extension):
def __init__(self, configs):
# Set defaults to match typical markdown behavior.
self.config = dict(output_xhtml=1,
show_body_only=1,
)
# Merge in user defined configs overriding any present if nessecary.
for c in configs:
self.config[c[0]] = c[1]
def extendMarkdown(self, md, md_globals):
# Save options to markdown instance
md.tidy_options = self.config
# Add TidyProcessor to postprocessors
md.postprocessors['tidy'] = TidyProcessor(md)
class TidyProcessor(markdown.postprocessors.Postprocessor):
def run(self, text):
# Pass text to Tidy. As Tidy does not accept unicode we need to encode
# it and decode its return value.
return str(tidy.parseString(text.encode('utf-8'),
**self.markdown.tidy_options))
def makeExtension(configs=None):
return TidyExtension(configs=configs)
| isc | 4,063,441,977,045,462,500 | 32.693548 | 78 | 0.651987 | false |
lmprice/ansible | lib/ansible/plugins/lookup/aws_service_ip_ranges.py | 102 | 3425 | # (c) 2016 James Turner <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: aws_service_ip_ranges
author:
- James Turner <[email protected]>
version_added: "2.5"
requirements:
- must have public internet connectivity
short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
description:
- AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
options:
service:
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
"""
EXAMPLES = """
vars:
ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
- name: "Pull S3 IP ranges, and print the default return style"
debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
"""
RETURN = """
_raw:
description: comma-separated list of CIDR ranges
"""
import json
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
amazon_response = json.load(resp)['prefixes']
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
except HTTPError as e:
raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
except URLError as e:
raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
except ConnectionError as e:
raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
if 'region' in kwargs:
region = kwargs['region']
amazon_response = (item for item in amazon_response if item['region'] == region)
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
return [item['ip_prefix'] for item in amazon_response]
| gpl-3.0 | -2,853,441,205,041,305,600 | 42.35443 | 128 | 0.682044 | false |
liuyxpp/blohg | docs/conf.py | 2 | 7298 | # -*- coding: utf-8 -*-
#
# blohg documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 26 23:45:47 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'blohg'
copyright = u'2010-2013, Rafael G. Martins'
cwd = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(cwd, '..', 'blohg'))
from version import version as release
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release.split('+')[0]
# The full version, including alpha/beta/rc tags.
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'blohgdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'blohg.tex', u'blohg Documentation',
u'Rafael G. Martins', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'blohg', u'blohg Documentation',
[u'Rafael G. Martins'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {}
highlight_language = 'none'
| gpl-2.0 | -9,159,897,205,813,033,000 | 31.14978 | 80 | 0.708961 | false |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit | -6,183,980,821,611,049,000 | 38.880952 | 69 | 0.717015 | false |
aallai/pyobfsproxy | obfsproxy/transports/obfs3_dh.py | 1 | 2685 | import binascii
import obfsproxy.common.rand as rand
def int_to_bytes(lvalue, width):
fmt = '%%.%dx' % (2*width)
return binascii.unhexlify(fmt % (lvalue & ((1L<<8*width)-1)))
class UniformDH:
"""
This is a class that implements a DH handshake that uses public
keys that are indistinguishable from 192-byte random strings.
The idea (and even the implementation) was suggested by Ian
Goldberg in:
https://lists.torproject.org/pipermail/tor-dev/2012-December/004245.html
https://lists.torproject.org/pipermail/tor-dev/2012-December/004248.html
Attributes:
mod, the modulus of our DH group.
g, the generator of our DH group.
group_len, the size of the group in bytes.
priv_str, a byte string representing our DH private key.
priv, our DH private key as an integer.
pub_str, a byte string representing our DH public key.
pub, our DH public key as an integer.
shared_secret, our DH shared secret.
"""
# 1536-bit MODP Group from RFC3526
mod = int(
"""FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF""".replace(' ','').replace('\n','').replace('\t',''), 16)
g = 2
group_len = 192 # bytes (1536-bits)
def __init__(self):
# Generate private key
self.priv_str = rand.random_bytes(self.group_len)
self.priv = int(binascii.hexlify(self.priv_str), 16)
# Make the private key even
flip = self.priv % 2
self.priv -= flip
# Generate public key
self.pub = pow(self.g, self.priv, self.mod)
if flip == 1:
self.pub = self.mod - self.pub
self.pub_str = int_to_bytes(self.pub, self.group_len)
self.shared_secret = None
def get_public(self):
return self.pub_str
def get_secret(self, their_pub_str):
"""
Given the public key of the other party as a string of bytes,
calculate our shared secret.
This might raise a ValueError since 'their_pub_str' is
attacker controlled.
"""
their_pub = int(binascii.hexlify(their_pub_str), 16)
self.shared_secret = pow(their_pub, self.priv, self.mod)
return int_to_bytes(self.shared_secret, self.group_len)
| bsd-3-clause | 4,533,037,761,407,175,700 | 34.328947 | 122 | 0.653631 | false |
chrisfranzen/django | django/contrib/auth/tokens.py | 296 | 2631 | from datetime import date
from django.conf import settings
from django.utils.http import int_to_base36, base36_to_int
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils import six
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
# Ensure results are consistent across DB backends
login_timestamp = user.last_login.replace(microsecond=0, tzinfo=None)
value = (six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp))
hash = salted_hmac(key_salt, value).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| bsd-3-clause | 2,530,676,891,755,478,500 | 35.041096 | 87 | 0.632839 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.