repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
cmauec/Cloud-Vision-Api | oauth2client/anyjson/simplejson/tests/test_for_json.py | 143 | 2767 | import unittest
import simplejson as json
class ForJson(object):
def for_json(self):
return {'for_json': 1}
class NestedForJson(object):
def for_json(self):
return {'nested': ForJson()}
class ForJsonList(object):
def for_json(self):
return ['list']
class DictForJson(dict):
def for_json(self):
return {'alpha': 1}
class ListForJson(list):
def for_json(self):
return ['list']
class TestForJson(unittest.TestCase):
def assertRoundTrip(self, obj, other, for_json=True):
if for_json is None:
# None will use the default
s = json.dumps(obj)
else:
s = json.dumps(obj, for_json=for_json)
self.assertEqual(
json.loads(s),
other)
def test_for_json_encodes_stand_alone_object(self):
self.assertRoundTrip(
ForJson(),
ForJson().for_json())
def test_for_json_encodes_object_nested_in_dict(self):
self.assertRoundTrip(
{'hooray': ForJson()},
{'hooray': ForJson().for_json()})
def test_for_json_encodes_object_nested_in_list_within_dict(self):
self.assertRoundTrip(
{'list': [0, ForJson(), 2, 3]},
{'list': [0, ForJson().for_json(), 2, 3]})
def test_for_json_encodes_object_nested_within_object(self):
self.assertRoundTrip(
NestedForJson(),
{'nested': {'for_json': 1}})
def test_for_json_encodes_list(self):
self.assertRoundTrip(
ForJsonList(),
ForJsonList().for_json())
def test_for_json_encodes_list_within_object(self):
self.assertRoundTrip(
{'nested': ForJsonList()},
{'nested': ForJsonList().for_json()})
def test_for_json_encodes_dict_subclass(self):
self.assertRoundTrip(
DictForJson(a=1),
DictForJson(a=1).for_json())
def test_for_json_encodes_list_subclass(self):
self.assertRoundTrip(
ListForJson(['l']),
ListForJson(['l']).for_json())
def test_for_json_ignored_if_not_true_with_dict_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
DictForJson(a=1),
{'a': 1},
for_json=for_json)
def test_for_json_ignored_if_not_true_with_list_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
ListForJson(['l']),
['l'],
for_json=for_json)
def test_raises_typeerror_if_for_json_not_true_with_object(self):
self.assertRaises(TypeError, json.dumps, ForJson())
self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False)
| gpl-2.0 |
sintefmath/Splipy | splipy/io/grdecl.py | 1 | 14711 | import numpy as np
from itertools import product, chain
from splipy import Surface, Volume, SplineObject, BSplineBasis
from splipy import surface_factory, volume_factory, curve_factory
from splipy.io import G2
from splipy.utils import ensure_listlike
from .master import MasterIO
import re
import warnings
from scipy.spatial import Delaunay
from scipy.spatial.qhull import QhullError
from tqdm import tqdm
import cv2
import h5py
class Box(object):
def __init__(self, x):
self.x = x
class DiscontBoxMesh(object):
def __init__(self, n, coord, zcorn):
nx, ny, nz = n
X = np.empty(n + 1, dtype=object)
Xz = np.zeros((nx + 1, ny + 1, 2 * nz, 3))
cells = np.empty(n, dtype=object)
for i, j, k in product(range(nx), range(ny), range(nz)):
x = []
for k0, j0, i0 in product(range(2), repeat=3):
# Interpolate to find the x,y values of this point
zmin, zmax = coord[i+i0, j+j0, :, 2]
z = zcorn[2*i+i0, 2*j+j0, 2*k+k0]
t = (z - zmax) / (zmin - zmax)
point = coord[i+i0, j+j0, 0] * t + coord[i+i0, j+j0, 1] * (1 - t)
x.append(point)
if X[i+i0,j+j0,k+k0] is None:
X[i+i0,j+j0,k+k0] = [point]
else:
X[i+i0,j+j0,k+k0].append(point)
Xz[i+i0,j+j0,2*k+k0,:] = point
cells[i,j,k] = Box(x)
self.X = X
self.Xz = Xz
self.n = n
def hull_or_none(x):
try:
return Delaunay(x)
except QhullError:
return None
self.plane_hull = np.array([
[Delaunay(np.reshape(coord[i:i+2, j:j+2, :, :], (8,3))) for j in range(ny)]
for i in range(nx)
], dtype=object)
self.hull = np.array([
[[hull_or_none(cell.x) for cell in cell_tower] for cell_tower in cells_tmp]
for cells_tmp in cells
], dtype=object)
def cell_at(self, x, guess=None):
# First, find the 'tower' containing x
check = -1
last_i = last_j = 0
numb_hits = []
if guess is not None:
i, j, _ = guess
check = self.plane_hull[i,j].find_simplex(x)
# if check > -1: print('correct tower!')
if check >= 0:
numb_hits += [(i,j)]
last_i = i
last_j = j
check = -1
if check == -1:
for (i, j), hull in np.ndenumerate(self.plane_hull):
check = hull.find_simplex(x)
if check >= 0:
numb_hits += [(i,j)]
last_i = i
last_j = j
i,j = last_i,last_j
# if len(numb_hits) != 1:
# print(numb_hits)
# print(x)
# print(guess)
# print(check)
# assert check >= 0
assert len(numb_hits) >= 1
# Find the correct cell in the 'tower'
check = -1
if guess is not None:
_, _, k = guess
check = self.hull[i,j,k].find_simplex(x)
# if check > -1: print('correct cell!')
if check == -1:
for (i,j) in numb_hits:
for k, hull in enumerate(self.hull[i,j,:]):
if hull is None: continue
check = hull.find_simplex(x)
if check >= 0: break
if check >= 0: break
if check < 0:
print(numb_hits)
print(x)
print(guess)
print(check)
# print(f'Returns {i} {j} {k} : {check}')
assert check >= 0
return i, j, k
def get_c0_avg(self):
"""Compute best-approximation vertices for a continuous mesh by averaging the location of all
corners that 'should' coincide.
"""
return np.array([[[np.mean(k,axis=0) for k in j] for j in i] for i in self.X])
def get_discontinuous_all(self):
"""Return a list of vertices suitable for a fully discontinuous mesh."""
return list(chain.from_iterable(xs[::-1] for xs in self.X.T.flat))
def get_discontinuous_z(self):
"""Return a list of vertices suitable for a mixed continuity mesh."""
return self.Xz
class GRDECL(MasterIO):
def __init__(self, filename):
if not filename.endswith('.grdecl'):
filename += '.grdecl'
self.filename = filename
self.attribute = {}
def __enter__(self):
self.fstream = open(self.filename, 'r')
self.line_number = 0
return self
def read_specgrid(self):
args = next(self.fstream).strip().split()
return np.array(args[:3], dtype=np.int32)
def read_coord(self):
nx, ny = self.n[:2]
ans = np.zeros((nx + 1, ny + 1, 2, 3))
for j, i in product(range(ny+1), range(nx+1)):
args = next(self.fstream).split()
ans[i,j,0,:] = np.array(args[:3], dtype=np.float64)
ans[i,j,1,:] = np.array(args[3:], dtype=np.float64)
return ans
def read_zcorn(self):
ntot = np.prod(self.n)*8
numbers = []
while len(numbers) < ntot:
numbers += next(self.fstream).split()
numbers = numbers[:ntot] # strip away any '/' characters at the end of the line
return np.reshape(np.array(numbers, dtype=np.float64), self.n*2, order='F')
def cell_property(self, dtype=np.float64):
ntot = np.prod(self.n)
numbers = []
while len(numbers) < ntot:
numbers += next(self.fstream).split()
numbers = numbers[:ntot] # strip away any '/' characters at the end of the line
return np.array(numbers, dtype=dtype)
def read(self):
for line in self.fstream:
line = line.strip().lower()
if line == 'specgrid':
self.n = self.read_specgrid()
elif line == 'coord':
self.coord = self.read_coord()
elif line == 'zcorn':
self.zcorn = self.read_zcorn()
elif line in {'actnum', 'permx', 'permy', 'permz', 'poro', 'satnum', 'rho', 'kx', 'kz', 'emodulus25', 'poissonratio25', 'pressure', }:
dtype = np.int32 if line in {'actnum', 'satnum'} else np.float64
self.attribute[line] = self.cell_property(dtype)
elif line in {'grid', '/', ''} or line.startswith('--'):
pass
elif not re.match('[0-9]', line[0]):
warnings.showwarning(
'Unkown keyword "{}" encountered in file'.format(line.split()[0]),
SyntaxWarning, self.filename, self.line_number, line=[],
)
else:
pass # silently skip large number blocks
self.raw = DiscontBoxMesh(self.n, self.coord, self.zcorn)
def get_c0_mesh(self):
# Create the C0-mesh
nx, ny, nz = self.n
X = self.raw.get_c0_avg()
b1 = BSplineBasis(2, [0] + [i/nx for i in range(nx+1)] + [1])
b2 = BSplineBasis(2, [0] + [i/ny for i in range(ny+1)] + [1])
b3 = BSplineBasis(2, [0] + [i/nz for i in range(nz+1)] + [1])
c0_vol = volume_factory.interpolate(X, [b1, b2, b3])
return c0_vol
def get_cm1_mesh(self):
# Create the C^{-1} mesh
nx, ny, nz = self.n
Xm1 = self.raw.get_discontinuous_all()
b1 = BSplineBasis(2, sorted(list(range(self.n[0]+1))*2))
b2 = BSplineBasis(2, sorted(list(range(self.n[1]+1))*2))
b3 = BSplineBasis(2, sorted(list(range(self.n[2]+1))*2))
discont_vol = Volume(b1, b2, b3, Xm1)
return discont_vol
def get_mixed_cont_mesh(self):
# Create mixed discontinuity mesh: C^0, C^0, C^{-1}
nx, ny, nz = self.n
Xz = self.raw.get_discontinuous_z()
b1 = BSplineBasis(2, sorted(list(range(self.n[0]+1))+[0,self.n[0]]))
b2 = BSplineBasis(2, sorted(list(range(self.n[1]+1))+[0,self.n[1]]))
b3 = BSplineBasis(2, sorted(list(range(self.n[2]+1))*2))
true_vol = Volume(b1, b2, b3, Xz, raw=True)
return true_vol
def texture(self, p, ngeom, ntexture, method='full', irange=[None,None], jrange=[None,None]):
# Set the dimensions of geometry and texture map
# ngeom = np.floor(self.n / (p-1))
# ntexture = np.floor(self.n * n)
# ngeom = ngeom.astype(np.int32)
# ntexture = ntexture.astype(np.int32)
ngeom = ensure_listlike(ngeom, 3)
ntexture = ensure_listlike(ntexture, 3)
p = ensure_listlike(p, 3)
# Create the geometry
ngx, ngy, ngz = ngeom
b1 = BSplineBasis(p[0], [0]*(p[0]-1) + [i/ngx for i in range(ngx+1)] + [1]*(p[0]-1))
b2 = BSplineBasis(p[1], [0]*(p[1]-1) + [i/ngy for i in range(ngy+1)] + [1]*(p[1]-1))
b3 = BSplineBasis(p[2], [0]*(p[2]-1) + [i/ngz for i in range(ngz+1)] + [1]*(p[2]-1))
l2_fit = surface_factory.least_square_fit
vol = self.get_c0_mesh()
i = slice(irange[0], irange[1], None)
j = slice(jrange[0], jrange[1], None)
# special case number of evaluation points for full domain
if irange[1] == None: irange[1] = vol.shape[0]
if jrange[1] == None: jrange[1] = vol.shape[1]
if irange[0] == None: irange[0] = 0
if jrange[0] == None: jrange[0] = 0
nu = np.diff(irange)
nv = np.diff(jrange)
nw = vol.shape[2]
u = np.linspace(0, 1, nu)
v = np.linspace(0, 1, nv)
w = np.linspace(0, 1, nw)
crvs = []
crvs.append(curve_factory.polygon(vol[i ,jrange[0] , 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[i ,jrange[0] ,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[i ,jrange[1]-1, 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[i ,jrange[1]-1,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,j , 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,j ,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,j , 0,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,j ,-1,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,jrange[0] , :,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[0] ,jrange[1]-1, :,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,jrange[0] , :,:].squeeze()))
crvs.append(curve_factory.polygon(vol[irange[1]-1,jrange[1]-1, :,:].squeeze()))
# with G2('curves.g2') as myfile:
# myfile.write(crvs)
# print('Written curve.g2')
if method == 'full':
bottom = l2_fit(vol[i, j, 0,:].squeeze(), [b1, b2], [u, v])
top = l2_fit(vol[i, j, -1,:].squeeze(), [b1, b2], [u, v])
left = l2_fit(vol[irange[0] ,j, :,:].squeeze(), [b2, b3], [v, w])
right = l2_fit(vol[irange[1]-1,j, :,:].squeeze(), [b2, b3], [v, w])
front = l2_fit(vol[i, jrange[0], :,:].squeeze(), [b1, b3], [u, w])
back = l2_fit(vol[i, jrange[1]-1,:,:].squeeze(), [b1, b3], [u, w])
volume = volume_factory.edge_surfaces([left, right, front, back, bottom, top])
elif method == 'z':
bottom = l2_fit(vol[i,j, 0,:].squeeze(), [b1, b2], [u, v])
top = l2_fit(vol[i,j,-1,:].squeeze(), [b1, b2], [u, v])
volume = volume_factory.edge_surfaces([bottom, top])
volume.set_order(*p)
volume.refine(ngz - 1, direction='w')
volume.reverse(direction=2)
# Point-to-cell mapping
# TODO: Optimize more
eps = 1e-2
u = [np.linspace(eps, 1-eps, n) for n in ntexture]
points = volume(*u).reshape(-1, 3)
cellids = np.zeros(points.shape[:-1], dtype=int)
cell = None
nx, ny, nz = self.n
for ptid, point in enumerate(tqdm(points, desc='Inverse mapping')):
i, j, k = cell = self.raw.cell_at(point) # , guess=cell)
cellid = i*ny*nz + j*nz + k
cellids[ptid] = cellid
cellids = cellids.reshape(tuple(ntexture))
all_textures = {}
for name in self.attribute:
data = self.attribute[name][cellids]
# TODO: This flattens the image if it happens to be 3D (or higher...)
# do we need a way to communicate the structure back to the caller?
# data = data.reshape(-1, data.shape[-1])
# TODO: This normalizes the image,
# but we need a way to communicate the ranges back to the caller
# a, b = min(data.flat), max(data.flat)
# data = ((data - a) / (b - a) * 255).astype(np.uint8)
all_textures[name] = data
all_textures['cellids'] = cellids
return volume, all_textures
def to_ifem(self, p, ngeom, ntexture, method='full', irange=[None,None], jrange=[None,None]):
translate = {
'emodulus25' : 'stiffness',
'kx' : 'permx',
'ky' : 'permy',
'kz' : 'permz',
'poissonratio25': 'poisson'}
h5_filename = 'textures.h5'
h5_file = h5py.File(h5_filename, 'w')
vol, textures = self.texture(p, ngeom, ntexture, method, irange, jrange)
# augment dataset with missing information
if 'kx' in textures and not 'ky' in textures:
textures['ky'] = textures['kx']
# print information to png-images and hdf5-files
print(r'<porotexturematerial>')
for name, data in textures.items():
# translate to more IFEM-friendly terminology
if name in translate: name = translate[name]
h5_file.create_dataset(name, data=data, compression='gzip')
a, b = min(data.flat), max(data.flat)
img = ((data - a) / (b - a) * 255).astype(np.uint8)
n = data.shape
img = img.reshape(n[0], n[1]*n[2])
print(' <property file="{}.png" min="{}" max="{}" name="{}" nx="{}" ny="{}" nz="{}"/>'.format(name, a,b, name, n[0], n[1], n[2]))
cv2.imwrite(name+'.png', img)
print(r'</porotexturematerial>')
h5_file.close()
print('Written {}'.format(h5_filename))
with G2('geom.g2') as myfile:
myfile.write(vol)
def __exit__(self, exc_type, exc_value, traceback):
self.fstream.close()
| gpl-3.0 |
rienafairefr/pynYNAB | tests/test_operations.py | 2 | 2203 | import json
import pytest
from pynYNAB.Client import nYnabClient
from pynYNAB.ClientFactory import nYnabClientFactory
from pynYNAB.exceptions import NoBudgetNameException
from pynYNAB.schema.catalog import BudgetVersion
class MockConnection2(object):
id = '12345'
@pytest.fixture
def factory():
return nYnabClientFactory('sqlite://')
@pytest.fixture
def connection():
return MockConnection2()
@pytest.fixture
def client(factory, connection):
return factory.create_client(budget_name='budget_name', connection=connection, sync=False)
def test_create_budget(factory):
currency_format = dict(
iso_code='USD',
example_format='123,456.78',
decimal_digits=2,
decimal_separator='.',
symbol_first=True,
group_separator=',',
currency_symbol='$',
display_symbol=True
)
date_format = dict(
format='MM/DD/YYYY'
)
class MockConnection(object):
def dorequest(this, request_dic, opname):
assert opname == opname
assert request_dic['currency_format'] == json.dumps(currency_format)
assert request_dic['date_format'] == json.dumps(date_format)
user_id = '1234'
id = '1234'
client = factory.create_client(budget_name='budget_name', connection=MockConnection(), sync=False)
client.create_budget(budget_name='New Budget')
def test_client_nobudget():
def create_client_no_budget():
nYnabClient()
pytest.raises(NoBudgetNameException, create_client_no_budget)
def test_select_budget(client):
budget_version1 = BudgetVersion(version_name='TestBudget')
budget_version2 = BudgetVersion(version_name='NewTestBudget')
client.catalog.ce_budget_versions = [budget_version1, budget_version2]
client.select_budget(budget_name='NewTestBudget')
assert budget_version2.id == client.budget_version_id
def test_create_client(client, connection):
assert connection == client.connection
assert connection == client.catalogClient.connection
assert connection == client.budgetClient.connection
assert 'budget_name' == client.budget_name
assert 'sqlite://' == str(client.session.bind.url)
| mit |
lildadou/Flexget | flexget/utils/qualities.py | 14 | 17358 | from __future__ import unicode_literals, division, absolute_import
import re
import copy
import logging
log = logging.getLogger('utils.qualities')
class QualityComponent(object):
""""""
def __init__(self, type, value, name, regexp=None, modifier=None, defaults=None):
"""
:param type: Type of quality component. (resolution, source, codec, or audio)
:param value: Value used to sort this component with others of like type.
:param name: Canonical name for this quality component.
:param regexp: Regexps used to match this component.
:param modifier: An integer that affects sorting above all other components.
:param defaults: An iterable defining defaults for other quality components if this component matches.
"""
if type not in ['resolution', 'source', 'codec', 'audio']:
raise ValueError('%s is not a valid quality component type.' % type)
self.type = type
self.value = value
self.name = name
self.modifier = modifier
self.defaults = defaults or []
# compile regexp
if regexp is None:
regexp = re.escape(name)
self.regexp = re.compile('(?<![^\W_])(' + regexp + ')(?![^\W_])', re.IGNORECASE)
def matches(self, text):
"""Test if quality matches to text.
:param string text: data te be tested against
:returns: tuple (matches, remaining text without quality data)
"""
match = self.regexp.search(text)
if not match:
return False, ""
else:
# remove matching part from the text
text = text[:match.start()] + text[match.end():]
return True, text
def __hash__(self):
return hash(self.type + str(self.value))
def __nonzero__(self):
return self.value
def __eq__(self, other):
if isinstance(other, basestring):
other = _registry.get(other)
if not isinstance(other, QualityComponent):
raise TypeError('Cannot compare %r and %r' % (self, other))
if other.type == self.type:
return self.value == other.value
else:
raise TypeError('Cannot compare %s and %s' % (self.type, other.type))
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, basestring):
other = _registry.get(other)
if not isinstance(other, QualityComponent):
raise TypeError('Cannot compare %r and %r' % (self, other))
if other.type == self.type:
return self.value < other.value
else:
raise TypeError('Cannot compare %s and %s' % (self.type, other.type))
def __ge__(self, other):
return not self.__lt__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __add__(self, other):
if not isinstance(other, int):
raise TypeError()
l = globals().get('_' + self.type + 's')
index = l.index(self) + other
if index >= len(l):
index = -1
return l[index]
def __sub__(self, other):
if not isinstance(other, int):
raise TypeError()
l = globals().get('_' + self.type + 's')
index = l.index(self) - other
if index < 0:
index = 0
return l[index]
def __repr__(self):
return '<%s(name=%s,value=%s)>' % (self.type.title(), self.name, self.value)
def __str__(self):
return self.name
def __deepcopy__(self, memo=None):
# No mutable attributes, return a regular copy
return copy.copy(self)
_resolutions = [
QualityComponent('resolution', 10, '360p'),
QualityComponent('resolution', 20, '368p', '368p?'),
QualityComponent('resolution', 30, '480p', '480p?'),
QualityComponent('resolution', 40, '576p', '576p?'),
QualityComponent('resolution', 45, 'hr'),
QualityComponent('resolution', 50, '720i'),
QualityComponent('resolution', 60, '720p', '(1280x)?720(p|hd)?x?(50)?'),
QualityComponent('resolution', 70, '1080i'),
QualityComponent('resolution', 80, '1080p', '(1920x)?1080p?')
]
_sources = [
QualityComponent('source', 10, 'workprint', modifier=-8),
QualityComponent('source', 20, 'cam', '(?:hd)?cam', modifier=-7),
QualityComponent('source', 30, 'ts', '(?:hd)?ts|telesync', modifier=-6),
QualityComponent('source', 40, 'tc', 'tc|telecine', modifier=-5),
QualityComponent('source', 50, 'r5', 'r[2-8c]', modifier=-4),
QualityComponent('source', 60, 'hdrip', 'hd[\W_]?rip', modifier=-3),
QualityComponent('source', 70, 'ppvrip', 'ppv[\W_]?rip', modifier=-2),
QualityComponent('source', 80, 'preair', modifier=-1),
QualityComponent('source', 90, 'tvrip', 'tv[\W_]?rip'),
QualityComponent('source', 100, 'dsr', 'dsr|ds[\W_]?rip'),
QualityComponent('source', 110, 'sdtv', '(?:[sp]dtv|dvb)(?:[\W_]?rip)?'),
QualityComponent('source', 120, 'webrip', 'web[\W_]?rip'),
QualityComponent('source', 130, 'dvdscr', '(?:(?:dvd|web)[\W_]?)?scr(?:eener)?', modifier=0),
QualityComponent('source', 140, 'bdscr', 'bdscr(?:eener)?'),
QualityComponent('source', 150, 'hdtv', 'a?hdtv(?:[\W_]?rip)?'),
QualityComponent('source', 160, 'webdl', 'web(?:[\W_]?(dl|hd))'),
QualityComponent('source', 170, 'dvdrip', 'dvd(?:[\W_]?rip)?'),
QualityComponent('source', 175, 'remux'),
QualityComponent('source', 180, 'bluray', '(?:b[dr][\W_]?rip|blu[\W_]?ray(?:[\W_]?rip)?)')
]
_codecs = [
QualityComponent('codec', 10, 'divx'),
QualityComponent('codec', 20, 'xvid'),
QualityComponent('codec', 30, 'h264', '[hx].?264'),
QualityComponent('codec', 40, 'h265', '[hx].?265|hevc'),
QualityComponent('codec', 50, '10bit', '10.?bit|hi10p')
]
channels = '(?:(?:[\W_]?5[\W_]?1)|(?:[\W_]?2[\W_]?(?:0|ch)))'
_audios = [
QualityComponent('audio', 10, 'mp3'),
# TODO: No idea what order these should go in or if we need different regexps
QualityComponent('audio', 20, 'aac', 'aac%s?' % channels),
QualityComponent('audio', 30, 'dd5.1', 'dd%s' % channels),
QualityComponent('audio', 40, 'ac3', 'ac3%s?' % channels),
QualityComponent('audio', 50, 'flac', 'flac%s?' % channels),
# The DTSs are a bit backwards, but the more specific one needs to be parsed first
QualityComponent('audio', 60, 'dtshd', 'dts[\W_]?hd(?:[\W_]?ma)?'),
QualityComponent('audio', 70, 'dts'),
QualityComponent('audio', 80, 'truehd')
]
_UNKNOWNS = {
'resolution': QualityComponent('resolution', 0, 'unknown'),
'source': QualityComponent('source', 0, 'unknown'),
'codec': QualityComponent('codec', 0, 'unknown'),
'audio': QualityComponent('audio', 0, 'unknown')
}
# For wiki generating help
'''for type in (_resolutions, _sources, _codecs, _audios):
print '{{{#!td style="vertical-align: top"'
for item in reversed(type):
print '- ' + item.name
print '}}}'
'''
_registry = {}
for items in (_resolutions, _sources, _codecs, _audios):
for item in items:
_registry[item.name] = item
def all_components():
return _registry.itervalues()
class Quality(object):
"""Parses and stores the quality of an entry in the four component categories."""
def __init__(self, text=''):
"""
:param text: A string to parse quality from
"""
self.text = text
self.clean_text = text
if text:
self.parse(text)
else:
self.resolution = _UNKNOWNS['resolution']
self.source = _UNKNOWNS['source']
self.codec = _UNKNOWNS['codec']
self.audio = _UNKNOWNS['audio']
def parse(self, text):
"""Parses a string to determine the quality in the four component categories.
:param text: The string to parse
"""
self.text = text
self.clean_text = text
self.resolution = self._find_best(_resolutions, _UNKNOWNS['resolution'], False)
self.source = self._find_best(_sources, _UNKNOWNS['source'])
self.codec = self._find_best(_codecs, _UNKNOWNS['codec'])
self.audio = self._find_best(_audios, _UNKNOWNS['audio'])
# If any of the matched components have defaults, set them now.
for component in self.components:
for default in component.defaults:
default = _registry[default]
if not getattr(self, default.type):
setattr(self, default.type, default)
def _find_best(self, qlist, default=None, strip_all=True):
"""Finds the highest matching quality component from `qlist`"""
result = None
search_in = self.clean_text
for item in qlist:
match = item.matches(search_in)
if match[0]:
result = item
self.clean_text = match[1]
if strip_all:
# In some cases we want to strip all found quality components,
# even though we're going to return only the last of them.
search_in = self.clean_text
if item.modifier is not None:
# If this item has a modifier, do not proceed to check higher qualities in the list
break
return result or default
@property
def name(self):
name = ' '.join(str(p) for p in (self.resolution, self.source, self.codec, self.audio) if p.value != 0)
return name or 'unknown'
@property
def components(self):
return [self.resolution, self.source, self.codec, self.audio]
@property
def _comparator(self):
modifier = sum(c.modifier for c in self.components if c.modifier)
return [modifier] + self.components
def __contains__(self, other):
if isinstance(other, basestring):
other = Quality(other)
if not other or not self:
return False
for cat in ('resolution', 'source', 'audio', 'codec'):
othercat = getattr(other, cat)
if othercat and othercat != getattr(self, cat):
return False
return True
def __nonzero__(self):
return any(self._comparator)
def __eq__(self, other):
if isinstance(other, basestring):
other = Quality(other)
if not other:
raise TypeError('`%s` does not appear to be a valid quality string.' % other.text)
if not isinstance(other, Quality):
if other is None:
return False
raise TypeError('Cannot compare %r and %r' % (self, other))
return self._comparator == other._comparator
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, basestring):
other = Quality(other)
if not other:
raise TypeError('`%s` does not appear to be a valid quality string.' % other.text)
if not isinstance(other, Quality):
raise TypeError('Cannot compare %r and %r' % (self, other))
return self._comparator < other._comparator
def __ge__(self, other):
return not self.__lt__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __repr__(self):
return '<Quality(resolution=%s,source=%s,codec=%s,audio=%s)>' % (self.resolution, self.source,
self.codec, self.audio)
def __str__(self):
return self.name
def __hash__(self):
# Make these usable as dict keys
return hash(self.name)
def get(quality_name):
"""Returns a quality object based on canonical quality name."""
found_components = {}
for part in quality_name.lower().split():
component = _registry.get(part)
if not component:
raise ValueError('`%s` is not a valid quality string' % part)
if component.type in found_components:
raise ValueError('`%s` cannot be defined twice in a quality' % component.type)
found_components[component.type] = component
if not found_components:
raise ValueError('No quality specified')
result = Quality()
for type, component in found_components.iteritems():
setattr(result, type, component)
return result
class RequirementComponent(object):
"""Represents requirements for a given component type. Can evaluate whether a given QualityComponent
meets those requirements."""
def __init__(self, type):
self.type = type
self.reset()
def reset(self):
self.min = None
self.max = None
self.acceptable = []
self.none_of = []
def allows(self, comp, loose=False):
if comp.type != self.type:
raise TypeError('Cannot compare %r against %s' % (comp, self.type))
if comp in self.none_of:
return False
if loose:
return True
if comp in self.acceptable:
return True
if self.min or self.max:
if self.min and comp < self.min:
return False
if self.max and comp > self.max:
return False
return True
if not self.acceptable:
return True
return False
def add_requirement(self, text):
if '-' in text:
min, max = text.split('-')
min, max = _registry[min], _registry[max]
if min.type != max.type != self.type:
raise ValueError('Component type mismatch: %s' % text)
self.min, self.max = min, max
elif '|' in text:
quals = text.split('|')
quals = [_registry[qual] for qual in quals]
if any(qual.type != self.type for qual in quals):
raise ValueError('Component type mismatch: %s' % text)
self.acceptable.extend(quals)
else:
qual = _registry[text.strip('!<>=+')]
if qual.type != self.type:
raise ValueError('Component type mismatch!')
if text in _registry:
self.acceptable.append(qual)
else:
if text[0] == '<':
if text[1] != '=':
qual -= 1
self.max = qual
elif text[0] == '>' or text.endswith('+'):
if text[1] != '=' and not text.endswith('+'):
qual += 1
self.min = qual
elif text[0] == '!':
self.none_of.append(qual)
class Requirements(object):
"""Represents requirements for allowable qualities. Can determine whether a given Quality passes requirements."""
def __init__(self, req=''):
self.text = ''
self.resolution = RequirementComponent('resolution')
self.source = RequirementComponent('source')
self.codec = RequirementComponent('codec')
self.audio = RequirementComponent('audio')
if req:
self.parse_requirements(req)
@property
def components(self):
return [self.resolution, self.source, self.codec, self.audio]
def parse_requirements(self, text):
"""
Parses a requirements string.
:param text: The string containing quality requirements.
"""
text = text.lower()
if self.text:
self.text += ' '
self.text += text
if self.text == 'any':
for component in self.components:
component.reset()
return
text = text.replace(',', ' ')
parts = text.split()
try:
for part in parts:
if '-' in part:
found = _registry[part.split('-')[0]]
elif '|' in part:
found = _registry[part.split('|')[0]]
else:
found = _registry[part.strip('!<>=+')]
for component in self.components:
if found.type == component.type:
component.add_requirement(part)
except KeyError as e:
raise ValueError('%s is not a valid quality component.' % e.args[0])
def allows(self, qual, loose=False):
"""Determine whether this set of requirements allows a given quality.
:param Quality qual: The quality to evaluate.
:param bool loose: If True, only ! (not) requirements will be enforced.
:rtype: bool
:returns: True if given quality passes all component requirements.
"""
if isinstance(qual, basestring):
qual = Quality(qual)
if not qual:
raise TypeError('`%s` does not appear to be a valid quality string.' % qual.text)
for r_component, q_component in zip(self.components, qual.components):
if not r_component.allows(q_component, loose=loose):
return False
return True
def __str__(self):
return self.text or 'any'
def __repr__(self):
return '<Requirements(%s)>' % self
| mit |
nsalomonis/AltAnalyze | AltAnalyzeViewer.py | 1 | 282646 | import os.path, sys, shutil
import os
import string, re
import subprocess
import numpy as np
import unique
import traceback
import wx
import wx.lib.scrolledpanel
import wx.grid as gridlib
try:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
#try: matplotlib.use('TkAgg')
#except Exception: pass
#import matplotlib.pyplot as plt ### Backend conflict issue when called prior to the actual Wx window appearing
#matplotlib.use('WXAgg')
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from numpy import arange, sin, pi
except Exception: pass
if os.name == 'nt': bheight=20
else: bheight=10
rootDirectory = unique.filepath(str(os.getcwd()))
currentDirectory = unique.filepath(str(os.getcwd())) + "/" + "Config/" ### NS-91615 alternative to __file__
currentDirectory = string.replace(currentDirectory,'AltAnalyzeViewer.app/Contents/Resources','')
os.chdir(currentDirectory)
parentDirectory = str(os.getcwd()) ### NS-91615 gives the parent AltAnalyze directory
sys.path.insert(1,parentDirectory) ### NS-91615 adds the AltAnalyze modules to the system path to from visualization_scripts import clustering and others
import UI
#These classes set up the "tab" feature in the program, allowing you to switch the viewer to different modes.
class PageTwo(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour("white")
myGrid = ""
class PageThree(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour("white")
class PageFour(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
class PageFive(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
class Main(wx.Frame):
def __init__(self,parent,id):
wx.Frame.__init__(self, parent, id,'AltAnalyze Results Viewer', size=(900,610))
self.Show()
self.Maximize(True) #### This allows the frame to resize to the host machine's max size
self.heatmap_translation = {}
self.heatmap_run = {}
self.species = 'Hs'
self.platform = 'RNASeq'
self.geneset_type = 'WikiPathways'
self.supported_genesets = []
self.runPCA = False
self.SetBackgroundColour((230, 230, 230))
self.species=''
#PANELS & WIDGETS
#self.panel is one of the TOP PANELS. These are used for title display, the open project button, and sort & filter buttons.
self.panel = wx.Panel(self, id=2, pos=(200,0), size=(600,45), style=wx.RAISED_BORDER)
self.panel.SetBackgroundColour((110, 150, 250))
#Panel 2 is the main view panel.
self.panel2 = wx.Panel(self, id=3, pos=(200,50), size=(1400,605), style=wx.RAISED_BORDER)
self.panel2.SetBackgroundColour((218, 218, 218))
#Panel 3 contains the pseudo-directory tree.
self.panel3 = wx.Panel(self, id=4, pos=(0,50), size=(200,625), style=wx.RAISED_BORDER)
self.panel3.SetBackgroundColour("white")
self.panel4 = wx.Panel(self, id=5, pos=(200,650), size=(1400,150), style=wx.RAISED_BORDER)
self.panel4.SetBackgroundColour("black")
#These are the other top panels.
self.panel_left = wx.Panel(self, id=12, pos=(0,0), size=(200,45), style=wx.RAISED_BORDER)
self.panel_left.SetBackgroundColour((218, 218, 218))
self.panel_right = wx.Panel(self, id=11, pos=(1100,0), size=(200,45), style=wx.RAISED_BORDER)
self.panel_right.SetBackgroundColour((218, 218, 218))
self.panel_right2 = wx.Panel(self, id=13, pos=(1300,0), size=(300,45), style=wx.RAISED_BORDER)
self.panel_right2.SetBackgroundColour((218, 218, 218))
self.panel_right2.SetMaxSize([300, 45])
#Lines 81-93 set up the user input box for the "sort" function (used on the table).
self.sortbox = wx.TextCtrl(self.panel_right2, id=7, pos=(55,10), size=(40,25))
wx.Button(self.panel_right2, id=8, label="Sort", pos=(5, 12), size=(40, bheight))
self.Bind(wx.EVT_BUTTON, self.SortTablefromButton, id=8)
self.AscendingRadio = wx.RadioButton(self.panel_right2, id=17, label="Sort", pos=(100, 3), size=(12, 12))
self.DescendingRadio = wx.RadioButton(self.panel_right2, id=18, label="Sort", pos=(100, 23), size=(12, 12))
font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
self.AscendingOpt = wx.StaticText(self.panel_right2, label="Ascending", pos=(115, 1))
self.AscendingOpt.SetFont(font)
self.DescendingOpt = wx.StaticText(self.panel_right2, label="Descending", pos=(115, 21))
self.DescendingOpt.SetFont(font)
#Lines 96-98 set up the user input box for the "filter" function (used on the table).
self.filterbox = wx.TextCtrl(self.panel_right, id=9, pos=(60,10), size=(125,25))
wx.Button(self.panel_right, id=10, label="Filter", pos=(0, 12), size=(50, bheight))
self.Bind(wx.EVT_BUTTON, self.FilterTablefromButton, id=10)
#Lines 101-103 set up the in-program log.
self.control = wx.TextCtrl(self.panel4, id=6, pos=(1,1), size=(1400,150), style=wx.TE_MULTILINE)
self.control.write("Welcome to AltAnalyze Results Viewer!" + "\n")
self.Show(True)
self.main_results_directory = ""
#self.browser is the "directory tree" where groups of files are instantiated in self.browser2.
self.browser = wx.TreeCtrl(self.panel3, id=2000, pos=(0,0), size=(200,325))
#self.browser2 is the "file group" where groups of files are accessed, respective to the directory selected in self.browser.
self.browser2 = wx.TreeCtrl(self.panel3, id=2001, pos=(0,325), size=(200,325))
self.tree = self.browser
#self.sortdict groups the table headers to integers---this works with sort function.
self.sortdict = {"A" : 0, "B" : 1, "C" : 2, "D" : 3, "E" : 4, "F" : 5, "G" : 6, "H" : 7, "I" : 8, "J" : 9, "K" : 10, "L" : 11, "M" : 12, "N" : 13, "O" : 14, "P" : 15, "Q" : 16, "R" : 17, "S" : 18, "T" : 19, "U" : 20, "V" : 21, "W" : 22, "X" : 23, "Y" : 24, "Z" : 25, "AA" : 26, "AB" : 27, "AC" : 28, "AD" : 29, "AE" : 30, "AF" : 31, "AG" : 32, "AH" : 33, "AI" : 34, "AJ" : 35, "AK" : 36, "AL" : 37, "AM" : 38, "AN" : 39, "AO" : 40, "AP" : 41, "AQ" : 42, "AR" : 43, "AS" : 44, "AT" : 45, "AU" : 46, "AV" : 47, "AW" : 48, "AX" : 49, "AY" : 50, "AZ" : 51}
#SIZER--main sizer for the program.
ver = wx.BoxSizer(wx.VERTICAL)
verpan2 = wx.BoxSizer(wx.VERTICAL)
hpan1 = wx.BoxSizer(wx.HORIZONTAL)
hpan2 = wx.BoxSizer(wx.HORIZONTAL)
hpan3 = wx.BoxSizer(wx.HORIZONTAL)
verpan2.Add(self.panel2, 8, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel_left, 5, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel, 24, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel_right, 3, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel_right2, 3, wx.ALL|wx.EXPAND, 2)
hpan2.Add(self.panel3, 1, wx.ALL|wx.EXPAND, 2)
hpan2.Add(verpan2, 7, wx.ALL|wx.EXPAND, 2)
hpan3.Add(self.panel4, 1, wx.ALL|wx.EXPAND, 2)
ver.Add(hpan1, 1, wx.EXPAND)
ver.Add(hpan2, 18, wx.EXPAND)
ver.Add(hpan3, 4, wx.EXPAND)
self.browser.SetSize(self.panel3.GetSize())
self.SetSizer(ver)
#TABS: lines 137-159 instantiate the tabs for the main viewing panel.
self.nb = wx.Notebook(self.panel2, id=7829, style = wx.NB_BOTTOM)
self.page1 = wx.ScrolledWindow(self.nb, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.VSCROLL )
self.page1.SetScrollRate( 5, 5 )
self.page2 = PageTwo(self.nb)
self.page3 = PageThree(self.nb)
self.page4 = PageFour(self.nb)
self.nb.AddPage(self.page2, "Table")
self.nb.AddPage(self.page1, "PNG")
self.nb.AddPage(self.page3, "Interactive")
self.page3.SetBackgroundColour((218, 218, 218))
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.panel2.SetSizer(sizer)
self.page1.SetBackgroundColour("white")
self.myGrid = gridlib.Grid(self.page2, id=1002)
#self.myGrid.CreateGrid(100, self.dataset_file_length) ### Sets this at 400 columns rather than 100 - Excel like
self.Bind(gridlib.EVT_GRID_CELL_RIGHT_CLICK, self.GridRightClick, id=1002)
self.Bind(gridlib.EVT_GRID_CELL_LEFT_DCLICK, self.GridRowColor, id=1002)
self.HighlightedCells = []
gridsizer = wx.BoxSizer(wx.VERTICAL)
gridsizer.Add(self.myGrid)
self.page2.SetSizer(gridsizer)
self.page2.Layout()
#In the event that the interactive tab is chosen, a function must immediately run.
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.InteractiveTabChoose, id=7829)
#INTERACTIVE PANEL LAYOUT: lines 167-212
#Pca Setup
self.RunButton1 = wx.Button(self.page3, id=43, label="Run", pos=(275, 150), size=(120, bheight))
self.Bind(wx.EVT_BUTTON, self.InteractiveRun, id=43)
self.Divider1 = self.ln = wx.StaticLine(self.page3, pos=(5,100))
self.ln.SetSize((415,10))
IntTitleFont = wx.Font(15, wx.SWISS, wx.NORMAL, wx.BOLD)
self.InteractiveTitle = wx.StaticText(self.page3, label="Main Dataset Parameters", pos=(10, 15))
self.InteractiveDefaultMessage = wx.StaticText(self.page3, label="No interactive options available.", pos=(10, 45))
self.InteractiveTitle.SetFont(IntTitleFont)
self.IntFileTxt = wx.TextCtrl(self.page3, id=43, pos=(105,45), size=(375,20))
self.InteractiveFileLabel = wx.StaticText(self.page3, label="Selected File:", pos=(10, 45))
self.Yes1Label = wx.StaticText(self.page3, label="Yes", pos=(305, 80))
self.No1Label = wx.StaticText(self.page3, label="No", pos=(375, 80))
self.D_3DLabel = wx.StaticText(self.page3, label="3D", pos=(305, 120))
self.D_2DLabel = wx.StaticText(self.page3, label="2D", pos=(375, 120))
self.IncludeLabelsRadio = wx.RadioButton(self.page3, id=40, pos=(285, 83), size=(12, 12), style=wx.RB_GROUP)
self.No1Radio = wx.RadioButton(self.page3, id=41, pos=(355, 83), size=(12, 12))
self.IncludeLabelsRadio.SetValue(True)
#self.EnterPCAGenes = wx.TextCtrl(self.page3, id=48, pos=(105,45), size=(375,20))
self.D_3DRadio = wx.RadioButton(self.page3, id=46, pos=(285, 123), size=(12, 12), style=wx.RB_GROUP)
self.D_2DRadio = wx.RadioButton(self.page3, id=47, pos=(355, 123), size=(12, 12))
self.D_3DRadio.SetValue(True)
self.Opt1Desc = wx.StaticText(self.page3, label="Display sample labels next to each object", pos=(10, 80))
self.Opt2Desc = wx.StaticText(self.page3, label="Dimensions to display", pos=(10, 120))
self.IntFileTxt.Hide()
self.InteractiveFileLabel.Hide()
self.Yes1Label.Hide()
self.No1Label.Hide()
self.D_3DLabel.Hide()
self.D_2DLabel.Hide()
self.IncludeLabelsRadio.Hide()
self.No1Radio.Hide()
self.D_3DRadio.Hide()
self.D_2DRadio.Hide()
self.Opt1Desc.Hide()
self.Opt2Desc.Hide()
self.RunButton1.Hide()
self.Divider1.Hide()
#TERMINAL SETUP
TxtBox = wx.BoxSizer(wx.VERTICAL)
TxtBox.Add(self.control, 1, wx.EXPAND)
self.panel4.SetSizer(TxtBox)
self.panel4.Layout()
#SELECTION LIST
self.TopSelectList = []
self.SearchArray = []
self.SearchArrayFiltered = []
self.TopID = ""
self.ColoredCellList = []
#LOGO
self.png = wx.Image("logo.gif", wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
self.nb.SetSelection(1)
browspan = wx.BoxSizer(wx.VERTICAL)
browspan.Add(self.browser, 1, wx.EXPAND)
browspan.Add(self.browser2, 1, wx.EXPAND)
self.panel3.SetSizer(browspan)
self.PanelTitle = wx.StaticText(self.panel, label="", pos=(210, 15))
#Open Button
ButtonMan = wx.Button(self.panel_left, id=1001, label="Open Project", pos=(0,0), size=(100,100))
self.Bind(wx.EVT_BUTTON, self.OnOpen, id=1001)
OpenSizer = wx.BoxSizer(wx.HORIZONTAL)
OpenSizer.Add(ButtonMan, 1, wx.EXPAND)
self.panel_left.SetSizer(OpenSizer)
#STATUS BAR CREATE --- not all of these are currently functional. The "edit" menu still needs to be implemented.
status = self.CreateStatusBar()
menubar = wx.MenuBar()
file = wx.Menu()
edit = wx.Menu()
view = wx.Menu()
search = wx.Menu()
filter_table = wx.Menu()
help_menu = wx.Menu()
open_menu = wx.Menu()
open_menu.Append(120, 'Project')
open_menu.Append(121, 'File')
file.AppendMenu(101, '&Open\tCtrl+O', open_menu)
file.Append(102, '&Save\tCtrl+S', 'Save the document')
file.AppendSeparator()
file.Append(103, 'Options', '')
file.AppendSeparator()
quit = wx.MenuItem(file, 105, '&Quit\tCtrl+Q', 'Quit the Application')
file.AppendItem(quit)
edit.Append(109, 'Undo', '')
edit.Append(110, 'Redo', '')
edit.AppendSeparator()
edit.Append(106, '&Cut\tCtrl+X', '')
edit.Append(107, '&Copy\tCtrl+C', '')
edit.Append(108, '&Paste\tCtrl+V', '')
edit.AppendSeparator()
edit.Append(111, '&Select All\tCtrl+A', '')
view.Append(112, '&Clear Panel\tCtrl+.', '')
search.Append(113, 'Tree', '')
search.Append(114, 'Table', '')
filter_table.Append(116, 'Filter', '')
filter_table.Append(117, 'Sort', '')
help_menu.AppendSeparator()
help_menu.Append(139, 'Help', '')
help_menu.Append(140, 'About', '')
menubar.Append(file, "File")
menubar.Append(edit, "Edit")
menubar.Append(view, "View")
menubar.Append(search, "Search")
menubar.Append(filter_table, "Table")
menubar.Append(help_menu, "Help")
self.SetMenuBar(menubar)
#STATUS BAR BINDINGS
self.Bind(wx.EVT_MENU, self.OnOpen, id=120)
self.Bind(wx.EVT_MENU, self.OnOpenSingleFile, id=121)
self.Bind(wx.EVT_MENU, self.OnQuit, id=105)
self.Bind(wx.EVT_MENU, self.ClearVisualPanel, id=112)
self.Bind(wx.EVT_MENU, self.TreeSearch, id=113)
self.Bind(wx.EVT_MENU, self.GridSearch, id=114)
self.Bind(wx.EVT_MENU, self.FilterTable, id=116)
self.Bind(wx.EVT_MENU, self.SortTable, id=117)
self.Bind(wx.EVT_MENU, self.OnAbout, id=140)
self.Bind(wx.EVT_MENU, self.OnHelp, id=139)
self.Layout()
def OnQuit(self, event):
popup = wx.MessageDialog(None, "Are you sure you want to quit?", "Warning", wx.YES_NO)
popup_answer = popup.ShowModal()
#print popup_answer
if(popup_answer == 5103):
self.Close()
else:
return
def GridRowColor(self, event):
#This colors any row that has been selected and resets it accordingly: may be removed in future versions.
if len(self.HighlightedCells) > 0:
for i in self.HighlightedCells:
self.myGrid.SetCellBackgroundColour(i[0], i[1], (255, 255, 255))
self.HighlightedCells = []
self.GridRowEvent = event.GetRow()
for i in range(50):
self.myGrid.SetCellBackgroundColour(self.GridRowEvent, i, (235, 255, 255))
self.HighlightedCells.append((self.GridRowEvent, i))
def GridRightClick(self, event):
#Pop-up menu instantiation for a right click on the table.
self.GridRowEvent = event.GetRow()
# only do this part the first time so the events are only bound once
if not hasattr(self, "popupID3"):
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
if self.analyzeSplicing:
self.popupID3 = wx.NewId()
self.popupID4 = wx.NewId()
self.popupID5 = wx.NewId()
self.Bind(wx.EVT_MENU, self.GeneExpressionSummaryPlot, id=self.popupID1)
self.Bind(wx.EVT_MENU, self.PrintGraphVariables, id=self.popupID2)
if self.analyzeSplicing:
self.Bind(wx.EVT_MENU, self.AltExonViewInitiate, id=self.popupID3)
self.Bind(wx.EVT_MENU, self.IsoformViewInitiate, id=self.popupID4)
self.Bind(wx.EVT_MENU, self.SashimiPlotInitiate, id=self.popupID5)
# build the menu
menu = wx.Menu()
itemOne = menu.Append(self.popupID1, "Gene Plot")
#itemTwo = menu.Append(self.popupID2, "Print Variables")
if self.analyzeSplicing:
itemThree = menu.Append(self.popupID3, "Exon Plot")
itemFour = menu.Append(self.popupID4, "Isoform Plot")
itemFive = menu.Append(self.popupID5, "SashimiPlot")
# show the popup menu
self.PopupMenu(menu)
menu.Destroy()
def AltExonViewInitiate(self, event):
### Temporary option for exon visualization until the main tool is complete and database can be bundled with the program
i=0; values=[]
while i<1000:
try:
val = str(self.myGrid.GetCellValue(self.GridRowEvent, i))
values.append(val)
if ('G000' in val) and '->' not in val:
geneID_temp = string.split(val,":")[0]
if ('G000' in geneID_temp) and '->' not in geneID_temp:
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
else:
geneID_temp = string.split(val,":")[1]
if ('G000' in geneID_temp):
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
i+=1
except Exception: break
datasetDir = self.main_results_directory
#print datasetDir
self.control.write("Plotting... " + geneID + "\n")
data_type = 'raw expression'
show_introns = 'no'
analysisType = 'graph-plot'
exp_dir = unique.filepath(datasetDir+'/ExpressionInput')
#print exp_dir
exp_file = UI.getValidExpFile(exp_dir)
#print print exp_file
UI.altExonViewer(self.species,self.platform,exp_file,geneID,show_introns,analysisType,'')
def IsoformViewInitiate(self, event):
#print os.getcwd()
#This function is a part of the pop-up menu for the table: it plots a gene and protein level view.
os.chdir(parentDirectory)
t = os.getcwd()
#self.control.write(str(os.listdir(t)) + "\n")
gene = self.myGrid.GetCellValue(self.GridRowEvent, 0)
i=0; values=[]; spliced_junctions=[]
while i<1000:
try:
val = str(self.myGrid.GetCellValue(self.GridRowEvent, i))
values.append(val)
if ('G000' in val) and 'ENSP' not in val and 'ENST' not in val and '->' not in val:
geneID_temp = string.split(val,":")[0]
if ('G000' in geneID_temp) and '->' not in geneID_temp:
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
elif '->' in geneID_temp: pass
else:
geneID_temp = string.split(val,":")[1]
if ('G000' in geneID_temp):
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
i+=1
except Exception: break
#print [geneID]
self.control.write("Plotting... " + geneID + "\n")
from visualization_scripts import ExPlot
reload(ExPlot)
ExPlot.remoteGene(geneID,self.species,self.main_results_directory,self.CurrentFile)
#Q = subprocess.Popen(['python', 'ExPlot13.py', str(R)])
#os.chdir(currentDirectory)
def SashimiPlotInitiate(self, event):
#This function is a part of the pop-up menu for the table: it plots a SashimiPlot
datasetDir = str(self.main_results_directory)
geneID = None
#self.control.write(str(os.listdir(t)) + "\n")
i=0; values=[]; spliced_junctions=[]
while i<1000:
try:
val = str(self.myGrid.GetCellValue(self.GridRowEvent, i))
values.append(val)
if ('G000' in val) and ':E' in val:
#if 'ASPIRE' in self.DirFileTxt:
if ':ENS' in val:
val = 'ENS'+string.split(val,':ENS')[1]
val = string.replace(val,'|', ' ')
#Can also refer to MarkerFinder files
if ' ' in val:
if '.' not in string.split(val,' ')[1]:
val = string.split(val,' ')[0] ### get the gene
if 'Combined-junction' in self.DirFileTxt:
if '-' in val and '|' in val:
junctions = string.split(val,'|')[0]
val = 'ENS'+string.split(junctions,'-ENS')[-1]
spliced_junctions.append(val) ### exclusion junction
if 'index' in self.DirFileTxt: ### Splicing-index analysis
spliced_junctions.append(val)
elif '-' in val:
spliced_junctions.append(val) ### junction-level
if ('G000' in val) and geneID == None and '->' not in val:
geneID = string.split(val,":")[0]
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
i+=1
except Exception: break
if len(spliced_junctions)>0:
spliced_junctions = [spliced_junctions[-1]] ### Select the exclusion junction
else:
spliced_junctions = [geneID]
if 'DATASET' in self.DirFileTxt:
spliced_junctions = [geneID]
from visualization_scripts import SashimiPlot
reload(SashimiPlot)
self.control.write("Attempting to build SashimiPlots for " + str(spliced_junctions[0]) + "\n")
SashimiPlot.remoteSashimiPlot(self.species,datasetDir,datasetDir,None,events=spliced_junctions,show=True) ### assuming the bam files are in the root-dir
def GeneExpressionSummaryPlot(self, event):
#This function is a part of the pop-up menu for the table: it plots expression levels.
Wikipathway_Flag = 0
Protein_Flag = 0
VarGridSet = []
try:
for i in range(3000):
try:
p = self.myGrid.GetCellValue(0, i)
VarGridSet.append(p)
except Exception:
pass
for i in VarGridSet:
y = re.findall("WikiPathways", i)
if len(y) > 0:
Wikipathway_Flag = 1
break
if Wikipathway_Flag == 0:
for i in VarGridSet:
y = re.findall("Select Protein Classes", i)
if len(y) > 0:
Protein_Flag = 1
break
if Protein_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
if Wikipathway_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
q_barrel = []
for i in VariableBox:
q_box = []
q = i
for p in range(500):
if(q < 0):
break
q = q - 1
#Regular expression is needed to find the appropriate columns to match from.
FLAG_log_fold = re.findall("log_fold",VarGridSet[q])
FLAG_adjp = re.findall("adjp",VarGridSet[q])
FLAG_rawp = re.findall("rawp",VarGridSet[q])
FLAG_wiki = re.findall("Wiki",VarGridSet[q])
FLAG_pc = re.findall("Protein Classes",VarGridSet[q])
FLAG_avg = re.findall("avg",VarGridSet[q])
if(len(FLAG_log_fold) > 0 or len(FLAG_adjp) > 0 or len(FLAG_rawp) > 0 or len(FLAG_wiki) > 0 or len(FLAG_pc) > 0 or len(FLAG_avg) > 0):
break
q_box.append(q)
q_barrel.append((q_box))
Values_List = []
HeaderList = []
TitleList = self.myGrid.GetCellValue(self.GridRowEvent, 0)
for i in VariableBox:
HeaderList.append(self.myGrid.GetCellValue(0, i))
for box in q_barrel:
output_box = []
for value in box:
output_var = self.myGrid.GetCellValue(self.GridRowEvent, value)
output_box.append(float(output_var))
Values_List.append((output_box))
self.control.write("Plotting values from: " + str(self.myGrid.GetCellValue(self.GridRowEvent, 0)) + "\n")
Output_Values_List = []
Output_std_err = []
for box in Values_List:
T = 0
for item in box:
T = T + item
output_item = T / float(len(box))
Output_Values_List.append(output_item)
for box in Values_List:
box_std = np.std(box)
box_power = np.power((len(box)), 0.5)
std_err = box_std / float(box_power)
Output_std_err.append(std_err)
n_groups = len(Output_Values_List)
#PLOTTING STARTS --
means_men = Output_Values_List
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
pos = bar_width / float(2)
opacity = 0.4
error_config = {'ecolor': '0.3'}
with warnings.catch_warnings():
rects1 = plt.bar((index + pos), Output_Values_List, bar_width,
alpha=opacity,
color='b',
yerr=Output_std_err,
label="")
#plt.title(self.myGrid.GetCellValue(self.GridRowEvent, 2))
plt.title(TitleList)
plt.xticks(index + bar_width, HeaderList)
plt.legend()
plt.tight_layout()
plt.show()
#-- PLOTTING STOPS
except Exception:
self.control.write("Plot failed to output... only applicalbe for the file with prefix DATASET")
def PrintGraphVariables(self, event):
#This function is a part of the pop-up menu for the table: it prints the variables for the expression levels. Used for testing mainly.
Wikipathway_Flag = 0
Protein_Flag = 0
VarGridSet = []
for i in range(100):
p = self.myGrid.GetCellValue(0, i)
VarGridSet.append(p)
for i in VarGridSet:
y = re.findall("WikiPathways", i)
if len(y) > 0:
Wikipathway_Flag = 1
break
if Wikipathway_Flag == 0:
for i in VarGridSet:
y = re.findall("Select Protein Classes", i)
if len(y) > 0:
Protein_Flag = 1
break
if Protein_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
if Wikipathway_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
q_barrel = []
for i in VariableBox:
q_box = []
q = i
for p in range(500):
if(q < 0):
break
q = q - 1
FLAG_log_fold = re.findall("log_fold",VarGridSet[q])
FLAG_adjp = re.findall("adjp",VarGridSet[q])
FLAG_rawp = re.findall("rawp",VarGridSet[q])
FLAG_wiki = re.findall("Wiki",VarGridSet[q])
FLAG_pc = re.findall("Protein Classes",VarGridSet[q])
FLAG_avg = re.findall("avg",VarGridSet[q])
if(len(FLAG_log_fold) > 0 or len(FLAG_adjp) > 0 or len(FLAG_rawp) > 0 or len(FLAG_wiki) > 0 or len(FLAG_pc) > 0 or len(FLAG_avg) > 0):
break
q_box.append(q)
q_barrel.append((q_box))
self.control.write("Selected Row: " + str(self.myGrid.GetCellValue(self.GridRowEvent, 0)) + "\n")
self.control.write("Selected Columns: " + str(q_barrel) + "\n")
Values_List = []
HeaderList = []
for i in VariableBox:
HeaderList.append(self.myGrid.GetCellValue(0, i))
for box in q_barrel:
output_box = []
for value in box:
output_var = self.myGrid.GetCellValue(self.GridRowEvent, value)
output_box.append(float(output_var))
Values_List.append((output_box))
self.control.write("Selected Values: " + str(Values_List) + "\n")
def InteractiveTabChoose(self, event):
#If the interactive tab is chosen, a plot will immediately appear with the default variables.
try:
#The PCA and Heatmap flags are set; a different UI will appear for each of them.
PCA_RegEx = re.findall("PCA", self.DirFile)
Heatmap_RegEx = re.findall("hierarchical", self.DirFile)
if(self.nb.GetSelection() == 2):
if(len(PCA_RegEx) > 0 or len(Heatmap_RegEx) > 0):
self.InteractiveRun(event)
except:
pass
def getDatasetVariables(self):
for file in os.listdir(self.main_results_directory):
if 'AltAnalyze_report' in file and '.log' in file:
log_file = unique.filepath(self.main_results_directory+'/'+file)
log_contents = open(log_file, "rU")
species = ' species: '
platform = ' method: '
for line in log_contents:
line = line.rstrip()
if species in line:
self.species = string.split(line,species)[1]
if platform in line:
self.platform = string.split(line,platform)[1]
try:
self.supported_genesets = UI.listAllGeneSetCategories(self.species,'WikiPathways','gene-mapp')
self.geneset_type = 'WikiPathways'
except Exception:
try:
self.supported_genesets = UI.listAllGeneSetCategories(self.species,'GeneOntology','gene-mapp')
self.geneset_type = 'GeneOntology'
except Exception:
self.supported_genesets = []
self.geneset_type = 'None Selected'
#print 'Using',self.geneset_type, len(self.supported_genesets),'pathways'
break
try:
for file in os.listdir(self.main_results_directory+'/ExpressionOutput'):
if 'DATASET' in file:
dataset_file = unique.filepath(self.main_results_directory+'/ExpressionOutput/'+file)
for line in open(dataset_file,'rU').xreadlines():
self.dataset_file_length = len(string.split(line,'\t'))
break
except Exception:
pass
try:
if self.dataset_file_length<50:
self.dataset_file_length=50
except Exception:
self.dataset_file_length=50
self.myGrid.CreateGrid(100, self.dataset_file_length) ### Re-set the grid width based on the DATASET- file width
def OnOpen(self, event):
#Bound to the open tab from the menu and the "Open Project" button.
openFileDialog = wx.DirDialog(None, "Choose project", "", wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return
#self.input stream is the path of our project's main directory.
self.main_results_directory = openFileDialog.GetPath()
if (len(self.main_results_directory) > 0):
if self.species == '':
self.getDatasetVariables()
self.SearchArray = []
self.SearchArrayFiltered = []
self.control.write("Working..." + "\n")
#FLAG COLLECT
root = 'Data'
for (dirpath, dirnames, filenames) in os.walk(root):
for dirname in dirnames:
#fullpath = os.path.join(dirpath, dirname)
fullpath = currentDirectory+'/'+dirpath+'/'+dirname
for filename in sorted(filenames):
if filename == "location.txt":
#file_fullpath = unique.filepath(os.path.join(dirpath, filename))
file_fullpath = currentDirectory+'/'+dirpath+'/'+filename
file_location = open(file_fullpath, "r")
fl_array = []
for line in file_location:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\r")
if len(line) > 1:
fl_array.append(line[0])
fl_array.append(line[1])
else:
fl_array.append(line[0])
file_location.close()
#if dirname == 'ExonGraph': print fl_array
if(len(fl_array) == 3):
fl_array.append(dirpath)
self.SearchArray.append(fl_array)
self.control.write("Opening project at: " + self.main_results_directory + "\n")
self.browser2.DeleteAllItems()
#SEARCH USING FLAGS
count = 0
for FLAG in self.SearchArray:
if((FLAG[0][-1] != "/") and (FLAG[0][-1] != "\\")):
SearchingFlag = FLAG[0] + "/"
SearchingFlag = FLAG[0]
SearchingFlagPath = self.main_results_directory + "/" + SearchingFlag
try:
SFP_Contents = os.listdir(SearchingFlagPath)
for filename in SFP_Contents:
Search_base = FLAG[1]
Search_base = Search_base.split(":")
Search_base = Search_base[1]
Split_Extension = str(FLAG[2])
Split_Extension = Split_Extension.split(":")
S_E = str(Split_Extension[1]).split(",")
GOOD_FLAG = 0
if(Search_base != "*"):
for i in S_E:
if(filename[-4:] == i):
GOOD_FLAG = 1
if(Search_base != "*"):
candidate = re.findall(Search_base, filename)
if(Search_base == "*"):
candidate = "True"
GOOD_FLAG = 1
if (len(Search_base) == 0 or GOOD_FLAG == 0):
continue
if len(candidate) > 0:
self.SearchArrayFiltered.append(FLAG)
except:
continue
count = count + 1
#AVAILABLE DATA SET
try:
shutil.rmtree("AvailableData")
except:
pass
for i in self.SearchArrayFiltered:
AvailablePath = "Available" + i[3]
if '\\' in AvailablePath: ### Windows
AvailablePath = string.replace(AvailablePath,'/','\\')
if '/' in AvailablePath:
Path_List = AvailablePath.split("/")
else:
Path_List = AvailablePath.split("\\")
Created_Directory = ""
for directorynum in range(len(Path_List)):
if directorynum == 0:
Created_Directory = Created_Directory + Path_List[directorynum]
try:
os.mkdir(Created_Directory)
except:
continue
else:
Created_Directory = Created_Directory + "/" + Path_List[directorynum]
try:
os.mkdir(Created_Directory)
except:
continue
#TOP BROWSER SET
root = 'AvailableData'
color_root = [253, 253, 253]
self.tree.DeleteAllItems()
self.ids = {root : self.tree.AddRoot(root)}
self.analyzeSplicing=False
for (dirpath, dirnames, filenames) in os.walk(root):
#print 'x',[dirpath, dirnames, filenames]#;sys.exit()
for dirname in dirnames:
#print dirpath, dirname
if 'Splicing' in dirpath: self.analyzeSplicing=True
fullpath = os.path.join(dirpath, dirname)
#print currentDirectory+'/'+dirpath
self.ids[fullpath] = self.tree.AppendItem(self.ids[dirpath], dirname)
DisplayColor = [255, 255, 255]
DisplayColor[0] = color_root[0] - len(dirpath)
DisplayColor[1] = color_root[1] - len(dirpath)
DisplayColor[2] = color_root[2] - len(dirpath)
self.tree.SetItemBackgroundColour(self.ids[fullpath], DisplayColor)
for i in self.SearchArrayFiltered:
SearchRoot = "Available" + i[3]
if(SearchRoot == fullpath):
SearchSplit = i[1].split(":")
SearchSplit = SearchSplit[1]
SearchSplit = SearchSplit + ";" + i[0]
SearchSplit = SearchSplit + ";" + i[2]
DisplayColor = [130, 170, 250]
self.tree.SetItemData(self.ids[fullpath],wx.TreeItemData(SearchSplit))
self.tree.SetItemBackgroundColour(self.ids[fullpath], DisplayColor)
self.tree.SetItemBackgroundColour(self.ids[root], [100, 140, 240])
self.tree.Expand(self.ids[root])
try: self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.SelectedTopTreeID, self.tree)
except Exception: pass
#OPENING DISPLAY
try:
self.LOGO.Destroy()
except:
pass
self.png = wx.Image(rootDirectory+"/Config/no-image-available.png", wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
self.control.write("Resetting grid..." + "\n")
self.control.write("Currently displaying: " + "SUMMARY" + "\n")
self.myGrid.ClearGrid()
if 'ExpressionInput' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ExpressionInput')[0]
if 'AltResults' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'AltResults')[0]
if 'ExpressionOutput' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ExpressionOutput')[0]
if 'GO-Elite' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'GO-Elite')[0]
if 'ICGS' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ICGS')[0]
if 'DataPlots' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'DataPlots')[0]
if 'AltExpression' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'AltExpression')[0]
if 'AltDatabase' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'AltDatabase')[0]
if 'ExonPlots' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ExonPlots')[0]
if 'SashimiPlots' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'SashimiPlots')[0]
opening_display_folder = self.main_results_directory + "/ExpressionOutput"
try:
list_contents = os.listdir(opening_display_folder)
target_file = ""
for file in list_contents:
candidate = re.findall("SUMMARY", file)
if len(candidate) > 0:
target_file = file
break
except Exception:
opening_display_folder = self.main_results_directory
list_contents = os.listdir(opening_display_folder)
for file in list_contents:
candidate = re.findall(".log", file)
if len(candidate) > 0:
target_file = file ### get the last log file
target_file = unique.filepath(opening_display_folder + "/" + target_file)
opened_target_file = open(target_file, "r")
opened_target_file_contents = []
for line in opened_target_file:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if len(line)==1: line += ['']*5
opened_target_file_contents.append((line))
self.table_length = len(opened_target_file_contents)
for cell in self.ColoredCellList:
try: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
except Exception: pass
self.ColoredCellList = []
x_count = 0
for item_list in opened_target_file_contents:
y_count = 0
for item in item_list:
try:
self.myGrid.SetCellValue(x_count, y_count, item)
except Exception:
pass ### if the length of the row is 0
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
#This line always sets the opening display to the "Table" tab.
self.nb.SetSelection(0)
def OnOpenSingleFile(self, event):
#Opens only one file as opposed to the whole project; possibly unstable and needs further testing.
openFileDialog = wx.FileDialog(self, "", "", "", "", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return
single_input_stream = openFileDialog.GetPath()
self.control.write(str(single_input_stream) + "\n")
if single_input_stream[-4:] == ".txt":
self.myGrid.ClearGrid()
self.DirFileTxt = single_input_stream
self.DirFile = single_input_stream
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
for line in table_file:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(len(table_file_contents) >= 5000):
break
table_file_contents.append((line))
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
try:
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
except Exception: pass
self.page2.Layout()
if single_input_stream[-4:] == ".png":
self.myGrid.ClearGrid()
try:
self.LOGO.Destroy()
except:
pass
self.png = wx.Image(single_input_stream, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
if single_input_stream[-4:] == ".pdf":
#http://wxpython.org/Phoenix/docs/html/lib.pdfviewer.html
pass
def OnSave(self, event):
#Save function is currently not implemented but is a priority for future updates.
saveFileDialog = wx.FileDialog(self, "", "", "", "", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if saveFileDialog.ShowModal() == wx.ID_CANCEL:
return
def OnSearch(self, event):
#This handles the search prompt pop-up box when using "search -> table" from the status bar menu.
popup = wx.TextEntryDialog(None, "Enter filter for results.", "Search", "Enter search here.")
if popup.ShowModal()==wx.ID_OK:
answer=popup.GetValue()
popup.Destroy()
else:
popup.Destroy()
return
def TreeSearch(self, event):
#Search tree function: searches the tree for a given phrase and opens the tree to that object.
popup = wx.TextEntryDialog(None, "Search the browser tree for directories and files.", "Search", "Enter search here.")
if popup.ShowModal()==wx.ID_OK:
answer=popup.GetValue()
self.control.write("K" + str(answer) + "\n")
os.chdir(currentDirectory) ### NS-91615 alternative to __file__
rootman = "AvailableData"
search_box = []
found = ""
for (dirpath, dirnames, filenames) in os.walk(rootman):
for dirname in dirnames:
fullpath = dirpath + "/" + dirname
search_box.append(fullpath)
self.control.write("Searching..." + "\n")
for path in search_box:
path2 = path.split("/")
search_candidate = path2[-1]
self.control.write(search_candidate + " " + str(answer) + "\n")
if(str(answer) == search_candidate):
found = path
break
self.control.write(found + "\n")
tree_recreate = found.split("/")
treepath = ""
self.control.write(str(range(len(tree_recreate))) + "\n")
tree_length = len(tree_recreate)
last_tree_value = len(tree_recreate) - 1
for i in range(tree_length):
self.control.write(str(i) + "\n")
if(i == 0):
self.tree.Expand(self.ids[tree_recreate[i]])
treepath = treepath + tree_recreate[i]
self.control.write(treepath + "\n")
if(i > 0 and i < last_tree_value):
treepath = treepath + "/" + tree_recreate[i]
self.control.write(treepath + "\n")
self.tree.Expand(self.ids[treepath])
if(i == last_tree_value):
treepath = treepath + "/" + tree_recreate[i]
self.control.write(treepath + "\n")
self.tree.SelectItem(self.ids[treepath])
popup.Destroy()
else:
popup.Destroy()
return
def GridSearch(self, event):
#Search table function: this searchs the table and highlights the search query in the table; also zooms to the nearest match.
popup = wx.TextEntryDialog(None, "Search the table.", "Search", "Enter search here.")
if popup.ShowModal()==wx.ID_OK:
PageDownFound = "False"
match_count = 0
answer=popup.GetValue()
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
if(self.table_length > 5100):
y_range = range(5100)
y_range = range(self.table_length)
x_range = range(100)
y_count = 0
for number in y_range:
x_count = 0
for number in x_range:
cellvalue = self.myGrid.GetCellValue(y_count, x_count)
gridmatch = re.findall(answer, cellvalue)
if(len(gridmatch) > 0):
if(PageDownFound == "False"):
PageScrollY = y_count
PageScrollX = x_count
PageDownFound = "True"
match_count = match_count + 1
self.ColoredCellList.append((y_count, x_count))
self.myGrid.SetCellBackgroundColour(y_count, x_count, (255, 255, 125))
x_count = x_count + 1
y_count = y_count + 1
#"MakeCellVisible" zooms to the given coordinates.
self.myGrid.MakeCellVisible(PageScrollY, PageScrollX)
terminal_list = []
for cell in self.ColoredCellList:
newrow = cell[0] + 1
newcolumn = cell[1] + 1
terminal_list.append((newrow, newcolumn))
self.control.write(str(match_count) + " matches found for " + answer + "\n")
self.control.write("At positions (row, column): " + str(terminal_list) + "\n")
popup.Destroy()
self.nb.SetSelection(0)
else:
popup.Destroy()
return
def FilterTable(self, event):
#The filter function displays ONLY the rows that have matches for the given search. Does not delete the filtered out data---table data is still fully functional and usable.
popup = wx.TextEntryDialog(None, "Filter the table.", "Search", "Enter filter phrase.")
if popup.ShowModal()==wx.ID_OK:
self.myGrid.ClearGrid()
answer=popup.GetValue()
try:
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
count = 0
for line in table_file:
line = line.rstrip(); line = string.replace(line,'"','')
regex_test = re.findall(answer.upper(), line.upper())
line = line.split("\t")
if(len(regex_test) > 0 or count == 0):
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((line))
count = count + 1
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to open txt." + "\n")
self.nb.SetSelection(0)
def SortTable(self, event):
#The sort function re-writes the table sorting by either descending or ascending values in a given column.
popup = wx.TextEntryDialog(None, "Sort the table.", "Sort", "Which column to sort from?")
if popup.ShowModal()==wx.ID_OK:
self.myGrid.ClearGrid()
answer=popup.GetValue()
answer = answer.upper()
try:
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
pre_sort2 = []
header = []
t_c = 0
column_clusters_flat = 0
for line in table_file:
line=string.replace(line,'Insufficient Expression','0')
try:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(t_c == 0):
header.append((line))
t_c = t_c + 1
continue
if(line[0] == "column_clusters-flat"):
header.append((line))
column_clusters_flat = 1
continue
line_sort_select = line[self.sortdict[answer]]
pre_sort1 = []
count = 0
for i in line:
if(count == 0):
try:
pre_sort1.append(float(line_sort_select))
except:
pre_sort1.append(line_sort_select)
pre_sort1.append(i)
if(count == self.sortdict[answer]):
count = count + 1
continue
if(count != 0):
pre_sort1.append(i)
count = count + 1
pre_sort2.append((pre_sort1))
except:
continue
table_file_contents.append(header[0])
if(column_clusters_flat == 1):
table_file_contents.append(header[1])
pre_sort2 = sorted(pre_sort2, reverse = True)
for line in pre_sort2:
try:
final_count1 = 0
final_count2 = 1
send_list = []
for item in line:
if(final_count1 == 0):
send_list.append(line[final_count2])
if(final_count1 == self.sortdict[answer]):
send_list.append(str(line[0]))
if(final_count1 != 0 and final_count1 != self.sortdict[answer]):
if(final_count1 < self.sortdict[answer]):
send_list.append(line[final_count2])
if(final_count1 > self.sortdict[answer]):
send_list.append(line[final_count1])
final_count1 = final_count1 + 1
final_count2 = final_count2 + 1
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((send_list))
except:
continue
n_table_file_contents = []
if(answer.upper() == "A"):
for i in range(len(table_file_contents)):
if(i == 0):
max_length = len(table_file_contents[i])
if(max_length < len(table_file_contents[i])):
n_l = table_file_contents[i][2:]
else:
n_l = table_file_contents[i]
n_table_file_contents.append((n_l))
table_file_contents = n_table_file_contents
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSizeRows(True)
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to sort." + "\n")
self.nb.SetSelection(0)
def FilterTablefromButton(self, event):
#Same filter function as before, but this function is bound to the button in the top-right corner of the main GUI.
self.myGrid.ClearGrid()
#In single line text boxes, you must always set 0 to the GetLineText value; 0 represents the first and only line.
answer = self.filterbox.GetLineText(0)
try:
try:
self.myGrid.DeleteRows(100, self.AppendTotal, True)
except:
pass
table_file_contents = []
count = 0
for line in open(self.DirFileTxt,'rU').xreadlines():
line = line.rstrip(); line = string.replace(line,'"','')
regex_test = re.findall(answer.upper(), line.upper())
line = line.split("\t")
if(len(regex_test) > 0 or count == 0):
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((line))
count = count + 1
self.table_length = len(table_file_contents)
self.control.write("Table Length: " + str(self.table_length) + "\n")
if(self.table_length > 100):
self.AppendTotal = self.table_length - 100
self.myGrid.AppendRows(self.AppendTotal, True)
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to open txt." + "\n")
self.nb.SetSelection(0)
def SortTablefromButton(self, event):
#Same sort function as before, but this function is bound to the button in the top-right corner of the main GUI.
answer = self.sortbox.GetLineText(0)
self.myGrid.ClearGrid()
answer = answer.upper()
try:
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
pre_sort2 = []
header = []
t_c = 0
column_clusters_flat = 0
for line in table_file:
line=string.replace(line,'Insufficient Expression','0')
try:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(t_c == 0):
header.append((line))
t_c = t_c + 1
continue
if(line[0] == "column_clusters-flat"):
header.append((line))
column_clusters_flat = 1
continue
line_sort_select = line[self.sortdict[answer]]
pre_sort1 = []
count = 0
for i in line:
if(count == 0):
try:
pre_sort1.append(float(line_sort_select))
except:
pre_sort1.append(line_sort_select)
pre_sort1.append(i)
if(count == self.sortdict[answer]):
count = count + 1
continue
if(count != 0):
pre_sort1.append(i)
count = count + 1
pre_sort2.append((pre_sort1))
except:
continue
table_file_contents.append(header[0])
if(column_clusters_flat == 1):
table_file_contents.append(header[1])
if(self.DescendingRadio.GetValue() == True):
pre_sort2 = sorted(pre_sort2, reverse = True)
if(self.AscendingRadio.GetValue() == True):
pre_sort2 = sorted(pre_sort2)
for line in pre_sort2:
try:
final_count1 = 0
final_count2 = 1
send_list = []
for item in line:
if(final_count1 == 0):
send_list.append(line[final_count2])
if(final_count1 == self.sortdict[answer]):
send_list.append(str(line[0]))
if(final_count1 != 0 and final_count1 != self.sortdict[answer]):
if(final_count1 < self.sortdict[answer]):
send_list.append(line[final_count2])
if(final_count1 > self.sortdict[answer]):
send_list.append(line[final_count1])
final_count1 = final_count1 + 1
final_count2 = final_count2 + 1
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((send_list))
except:
continue
n_table_file_contents = []
if(answer.upper() == "A"):
for i in range(len(table_file_contents)):
if(i == 0):
max_length = len(table_file_contents[i])
if(max_length < len(table_file_contents[i])):
n_l = table_file_contents[i][2:]
else:
n_l = table_file_contents[i]
n_table_file_contents.append((n_l))
table_file_contents = n_table_file_contents
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSizeRows(True)
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to sort." + "\n")
self.nb.SetSelection(0)
def SelectedTopTreeID(self, event):
item = event.GetItem()
try:
#This handles the selection of an item in the TOP tree browser.
item = event.GetItem()
itemObject = self.tree.GetItemData(item).GetData()
SearchObject = itemObject.split(";")
SearchSuffix = SearchObject[0]
SearchPath = SearchObject[1]
SearchExtension = SearchObject[2]
SearchExtension = SearchExtension.split(":")
SearchExtension = SearchExtension[1:]
SearchExtension = SearchExtension[0]
SearchExtension = SearchExtension.split(",")
#SELECTION IMPLEMENT
ID_Strings = []
self.TopSelectList = []
self.TopID = SearchSuffix
root = self.main_results_directory + "/" + SearchPath
root_display = self.main_results_directory + "/" + SearchPath
root_contents = os.listdir(root)
root_contents_display = os.listdir(root)
for obj in root_contents:
if(SearchSuffix != "*"):
FindList = re.findall(SearchSuffix, obj)
if(len(FindList) > 0):
self.TopSelectList.append(obj)
#print obj
self.browser2.DeleteAllItems()
for filename in root_contents:
if(SearchSuffix != "*"):
FindList2 = re.findall(SearchSuffix, filename)
if(len(FindList2) > 0):
display_name = filename[0:-4]
ID_Strings.append(display_name)
else:
if(filename[-4] == "."):
display_name = filename[0:-4]
if "AVERAGE-" not in display_name and "COUNTS-" not in display_name:
ID_Strings.append(display_name)
ID_Strings = list(set(ID_Strings))
change_path = currentDirectory + "/UseDir" ### NS-91615 alternative to __file__
shutil.rmtree("UseDir")
os.mkdir("UseDir")
#self.control.write(ID_Strings[0] + "\n")
os.chdir(change_path)
for marker in ID_Strings:
try:
os.mkdir(marker)
except:
pass
os.chdir(currentDirectory) ### NS-91615 alternative to __file__
root = "UseDir"
color_root2 = [223, 250, 223]
self.ids2 = {root : self.browser2.AddRoot(root)}
for (dirpath, dirnames, filenames) in os.walk(root):
color_root2[0] = color_root2[0] - 1
color_root2[1] = color_root2[1] - 0
color_root2[2] = color_root2[2] - 1
for dirname in dirnames:
#self.control.write(str(SearchExtension) + "\n")
Extensions = dirname + "|" + str(SearchExtension) + "|" + str(SearchPath)
fullpath = os.path.join(dirpath, dirname)
self.ids2[fullpath] = self.browser2.AppendItem(self.ids2[dirpath], dirname)
self.browser2.SetItemData(self.ids2[fullpath],wx.TreeItemData(Extensions))
T = re.findall("DATASET", fullpath)
if(len(T) > 0):
self.browser2.SetItemBackgroundColour(self.ids2[fullpath], [250, 100, 100])
else:
self.browser2.SetItemBackgroundColour(self.ids2[fullpath], [130, 170, 250])
self.browser2.SetItemBackgroundColour(self.ids2[root], [110, 150, 250])
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.SelectedBottomTreeID, self.browser2)
self.browser2.ExpandAll()
#OPENING DISPLAY
display_file_selected = ""
TXT_FLAG = 0
PNG_FLAG = 0
if(root_display[-1] != "/"):
root_display = root_display + "/"
for possible in root_contents_display:
total_filepath = unique.filepath(root_display + possible)
if(possible[-4:] == ".txt"):
self.control.write("Displaying File: " + str(total_filepath) + "\n")
display_file_selected = total_filepath
break
TXT_FLAG = 0
PNG_FLAG = 0
#self.control.write(str(os.listdir(root)) + "\n")
#self.control.write(str(SearchExtension) + "\n")
for i in SearchExtension:
if(i == ".txt"):
TXT_FLAG = 1
#self.control.write(str(i) + "\n")
if(i == ".png"):
PNG_FLAG = 1
#self.control.write(str(i) + "\n")
if(root_display[-1] != "/"):
root_display = root_display + "/"
Pitch = os.listdir(root)
PitchSelect = Pitch[0]
self.CurrentFile = PitchSelect
#self.control.write(str(PitchSelect) + " " + root_display + "\n")
self.DirFile = unique.filepath(root_display + PitchSelect)
self.IntFileTxt.Clear()
self.IntFileTxt.write(self.DirFile)
self.DirFileTxt = unique.filepath(root_display + PitchSelect + ".txt")
DirFilePng = unique.filepath(root_display + PitchSelect + ".png")
self.myGrid.ClearGrid()
title_name = PitchSelect
try:
self.LOGO.Destroy()
except:
pass
try:
self.PanelTitle.Destroy()
except:
pass
font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD)
self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(5, 7))
self.PanelTitle.SetFont(font)
if(TXT_FLAG == 1):
try:
self.myGrid.DeleteRows(100, self.AppendTotal, True)
except:
pass
try:
#First time the DATASET file is imported
#font = wx.Font(16, wx.DECORATIVE, wx.BOLD, wx.NORMAL)
#self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(210, 15))
#self.PanelTitle.SetFont(font)
#table_file = open(self.DirFileTxt, "rU")
table_file_contents = []
column_lengths = []
count=0
for line in open(self.DirFileTxt,'rU').xreadlines():
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
column_lengths.append(len(line))
table_file_contents.append((line))
if count>2000: break
count+=1
self.max_column_length = max(column_lengths)
self.table_length = len(table_file_contents)
if(self.table_length > 100 and self.table_length < 5000):
self.AppendTotal = self.table_length - 100
self.myGrid.AppendRows(self.AppendTotal, True)
if(self.table_length >= 5000):
self.AppendTotal = 5000
self.myGrid.AppendRows(self.AppendTotal, True)
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
try:
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
except:
pass
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
TXT_FLAG = 0
self.control.write("Unable to open txt." + "\n")
try:
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
pass
if(PNG_FLAG == 1):
try:
open(DirFilePng, "r")
self.png = wx.Image(DirFilePng, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
except:
PNG_FLAG = 0
self.control.write("Unable to open png." + "\n")
try:
self.root_widget_id = 500
self.root_widget_text = 550
for i in range(self.root_widget_id, self.root_widget_end):
self.heatmap_ids[i].Destroy()
for i in range(self.root_widget_text, self.rwtend):
self.heatmap_ids[i].Destroy()
self.RunButton2.Destroy()
except:
pass
self.InteractivePanelUpdate(event)
if(PNG_FLAG == 1 and TXT_FLAG == 0):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
if(PNG_FLAG == 0 and TXT_FLAG == 1):
self.nb.SetSelection(0)
if(PNG_FLAG == 1 and TXT_FLAG == 1):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
except Exception: pass
def SelectedBottomTreeID(self, event):
#This handles the selection of an item in the BOTTOM tree browser; represents a file most of the time.
item = event.GetItem()
itemObject = self.browser2.GetItemData(item).GetData()
Parameters = itemObject.split("|")
file_extension = Parameters[1][1:-1]
file_extension.replace("'", "")
file_extension = file_extension.split(",")
file_exts = []
TXT_FLAG = 0
PNG_FLAG = 0
for i in file_extension:
i = i.replace("'", "")
i = i.replace(" ", "")
file_exts.append(i)
for i in file_exts:
if(i == ".txt"):
TXT_FLAG = 1
if(i == ".png"):
PNG_FLAG = 1
DirPath = self.main_results_directory + "/" + Parameters[2]
if(DirPath[-1] != "/"):
DirPath = DirPath + "/"
DirFile = DirPath + Parameters[0]
self.CurrentFile = DirFile
self.control.write("Displaying file: " + DirFile + "\n")
title_name = DirFile.split("/")
title_name = title_name[-1]
self.DirFile = unique.filepath(DirFile)
self.IntFileTxt.Clear()
self.IntFileTxt.write(self.DirFile)
self.DirFileTxt = DirFile + ".txt"
DirFilePng = DirFile + ".png"
self.myGrid.ClearGrid()
try:
self.LOGO.Destroy()
except:
pass
try:
self.PanelTitle.Destroy()
except:
pass
font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD)
self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(5, 7))
self.PanelTitle.SetFont(font)
#PNG_FLAG and TXT_FLAG are flags that sense the presence of an image or text file.
if(PNG_FLAG == 1):
try:
open(DirFilePng, "r")
self.png = wx.Image(DirFilePng, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
except:
PNG_FLAG = 0
self.control.write("Unable to open png." + "\n")
if(TXT_FLAG == 1):
try:
self.myGrid.DeleteRows(100, self.AppendTotal, True)
except:
pass
try:
count=0
#table_file = open(self.DirFileTxt, "r")
table_file_contents = []
column_lengths = []
for line in open(self.DirFileTxt,'rU').xreadlines():
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
column_lengths.append(len(line))
table_file_contents.append((line))
count+=1
if count>2000:break
self.max_column_length = max(column_lengths)
self.table_length = len(table_file_contents)
if(self.table_length > 100 and self.table_length < 5000):
self.AppendTotal = self.table_length - 100
self.myGrid.AppendRows(self.AppendTotal, True)
if(self.table_length >= 5000):
self.AppendTotal = 5000
self.myGrid.AppendRows(self.AppendTotal, True)
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
try: self.myGrid.SetCellValue(x_count, y_count, item) ###Here
except Exception:
### Unclear why this is throwing an error
#print traceback.format_exc()
#print x_count, y_count, item;sys.exit()
pass
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
print traceback.format_exc()
TXT_FLAG = 0
self.control.write("Unable to open txt." + "\n")
DATASET_FIND_FLAG = re.findall("DATASET", self.DirFileTxt)
count=0
if(len(DATASET_FIND_FLAG) > 0):
try:
#table_file = open(self.DirFileTxt, "rU")
table_file_contents = []
pre_sort2 = []
header = []
t_c = 0
column_clusters_flat = 0
answer = "AC"
for line in open(self.DirFileTxt,'rU').xreadlines():
#for line in table_file:
count+=1
if count>2000:
break
try:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(t_c == 0):
header.append((line))
t_c = t_c + 1
index=0
for i in line:
if 'ANOVA-rawp' in i: answer = index
index+=1
continue
if(line[0] == "column_clusters-flat"):
header.append((line))
column_clusters_flat = 1
continue
line_sort_select = line[answer]
pre_sort1 = []
count = 0
for i in line:
if(count == 0):
try:
pre_sort1.append(float(line_sort_select))
except:
pre_sort1.append(line_sort_select)
pre_sort1.append(i)
if(count == answer):
count = count + 1
continue
if(count != 0):
pre_sort1.append(i)
count = count + 1
pre_sort2.append((pre_sort1))
except:
continue
table_file_contents.append(header[0])
if(column_clusters_flat == 1):
table_file_contents.append(header[1])
pre_sort2 = sorted(pre_sort2)
for line in pre_sort2:
try:
final_count1 = 0
final_count2 = 1
send_list = []
for item in line:
if(final_count1 == 0):
send_list.append(line[final_count2])
if(final_count1 == answer):
send_list.append(str(line[0]))
if(final_count1 != 0 and final_count1 != answer):
if(final_count1 < answer):
send_list.append(line[final_count2])
if(final_count1 > answer):
send_list.append(line[final_count1])
final_count1 = final_count1 + 1
final_count2 = final_count2 + 1
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((send_list))
except:
continue
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
try:
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
except:
pass
self.myGrid.AutoSizeRows(True)
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to sort." + "\n")
self.nb.SetSelection(0)
self.InteractivePanelUpdate(event)
try:
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
pass
if(PNG_FLAG == 1 and TXT_FLAG == 0):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
if(PNG_FLAG == 0 and TXT_FLAG == 1):
self.nb.SetSelection(0)
if(PNG_FLAG == 1 and TXT_FLAG == 1):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
def InteractivePanelUpdate(self, event):
#Both the PCA UI and Heatmap UI share the same panel, so buttons and text boxes (as well as other GUI) will have to be destroyed/hidden
#whenever a new type of interactivity is selected.
self.IntFileTxt.Hide()
self.InteractiveFileLabel.Hide()
self.Yes1Label.Hide()
self.No1Label.Hide()
self.D_3DLabel.Hide()
self.D_2DLabel.Hide()
self.IncludeLabelsRadio.Hide()
self.No1Radio.Hide()
self.D_3DRadio.Hide()
self.D_2DRadio.Hide()
self.Opt1Desc.Hide()
self.Opt2Desc.Hide()
self.RunButton1.Hide()
self.Divider1.Hide()
self.InteractiveDefaultMessage.Hide()
try:
self.root_widget_id = 500
self.root_widget_text = 550
for i in range(self.root_widget_id, self.root_widget_end):
self.heatmap_ids[i].Destroy()
for i in range(self.root_widget_text, self.rwtend):
self.heatmap_ids[i].Destroy()
self.RunButton2.Destroy()
except:
pass
PCA_RegEx = re.findall("PCA", self.DirFile)
if(len(PCA_RegEx) > 0):
self.IntFileTxt.Show()
self.InteractiveFileLabel.Show()
self.Yes1Label.Show()
self.No1Label.Show()
self.D_3DLabel.Show()
self.D_2DLabel.Show()
self.IncludeLabelsRadio.Show()
self.No1Radio.Show()
self.D_3DRadio.Show()
self.D_2DRadio.Show()
self.Opt1Desc.Show()
self.Opt2Desc.Show()
self.RunButton1.Show()
self.Divider1.Show()
Heatmap_RegEx = re.findall("hierarchical", self.DirFile)
if(len(Heatmap_RegEx) > 0):
#Heatmap Setup
os.chdir(parentDirectory)
options_open = open(unique.filepath(currentDirectory+"/options.txt"), "rU")
heatmap_array = []
self.heatmap_ids = {}
self.heatmap_translation = {}
supported_geneset_types = UI.getSupportedGeneSetTypes(self.species,'gene-mapp')
supported_geneset_types += UI.getSupportedGeneSetTypes(self.species,'gene-go')
supported_geneset_types_alt = [self.geneset_type]
supported_genesets = self.supported_genesets
for line in options_open:
line = line.split("\t")
variable_name,displayed_title,display_object,group,notes,description,global_default,options = line[:8]
options = string.split(options,'|')
if(group == "heatmap"):
if(display_object == "file"):
continue
od = UI.OptionData(variable_name,displayed_title,display_object,notes,options,global_default)
od.setDefaultOption(global_default)
#"""
if variable_name == 'ClusterGOElite':
od.setArrayOptions(['None Selected','all']+supported_geneset_types)
elif variable_name == 'GeneSetSelection':
od.setArrayOptions(['None Selected']+supported_geneset_types_alt)
elif variable_name == 'PathwaySelection':
od.setArrayOptions(['None Selected']+supported_genesets)
elif od.DefaultOption() == '':
od.setDefaultOption(od.Options()[0])
if od.DefaultOption() == '---':
od.setDefaultOption('')#"""
heatmap_array.append(od)
#heatmap_array.append((line[1], line[2], line[7], line[6]))
os.chdir(currentDirectory)
root_widget_y_pos = 45
self.root_widget_id = 500
self.root_widget_text = 550
for od in heatmap_array:
#od.VariableName()
id = wx.NewId()
#print od.VariableName(),od.Options()
self.heatmap_translation[od.VariableName()] = self.root_widget_id
self.heatmap_ids[self.root_widget_text] = wx.StaticText(self.page3, self.root_widget_text, label=od.Display(), pos=(150, root_widget_y_pos))
if(od.DisplayObject() == "comboBox" or od.DisplayObject() == "multiple-comboBox"):
self.heatmap_ids[self.root_widget_id] = wx.ComboBox(self.page3, self.root_widget_id, od.DefaultOption(), (10, root_widget_y_pos), (120,25), od.Options(), wx.CB_DROPDOWN)
else:
self.heatmap_ids[self.root_widget_id] = wx.TextCtrl(self.page3, self.root_widget_id, od.DefaultOption(), (10, root_widget_y_pos), (120,25))
self.root_widget_id = self.root_widget_id + 1
self.root_widget_text = self.root_widget_text + 1
root_widget_y_pos = root_widget_y_pos + 25
self.rwtend = self.root_widget_text
self.root_widget_end = self.root_widget_id
self.RunButton2 = wx.Button(self.page3, id=599, label="Run", pos=(175, (self.root_widget_end + 10)), size=(120, bheight))
self.Bind(wx.EVT_BUTTON, self.InteractiveRun, id=599)
if(len(PCA_RegEx) == 0 and len(Heatmap_RegEx) == 0):
self.InteractiveDefaultMessage.Show()
def ClearVisualPanel(self, event):
#Deletes the current image on the viewing panel. Unstable and mostly broken; may be removed from future versions.
popup = wx.MessageDialog(None, "Are you sure you want to clear the visual panel?", "Warning", wx.YES_NO)
popup_answer = popup.ShowModal()
if(popup_answer == 5103):
try:
self.LOGO.Destroy()
self.panel2.Layout()
except:
pass
try:
self.myGrid.ClearGrid()
self.panel2.Layout()
except:
pass
popup.Destroy()
self.control.write("Visual panel cleared." + "\n")
else:
return
def InteractiveRun(self, event):
#This function is bound to the "Run" button on the interactive tab GUI. Generates an interactive plot.
#Currently updates on the panel are a priority and many changes may come with it.
RegExHeat = re.findall("hierarchical", self.DirFile)
if(len(RegExHeat) > 0):
for VariableName in self.heatmap_translation:
#self.control.write(str(self.heatmap_ids[self.heatmap_translation[VariableName]].GetValue()) + " " + str(VariableName) + " " + str(self.heatmap_ids[self.heatmap_translation[VariableName]]) + "\n")
try:
self.heatmap_translation[VariableName] = str(self.heatmap_ids[self.heatmap_translation[VariableName]].GetValue())
#print self.heatmap_translation[VariableName]
except Exception: pass
try:
#self.control.write(self.DirFile + "\n")
input_file_dir = self.DirFile + ".txt"
column_metric = self.heatmap_translation['column_metric']; #self.control.write(column_metric + "\n")
column_method = self.heatmap_translation['column_method']; #self.control.write(column_method + "\n")
row_metric = self.heatmap_translation['row_metric']; #self.control.write(row_metric + "\n")
row_method = self.heatmap_translation['row_method']; #self.control.write(row_method+ "\n")
color_gradient = self.heatmap_translation['color_selection']; #self.control.write(color_gradient + "\n")
cluster_rows = self.heatmap_translation['cluster_rows']; #self.control.write(cluster_rows + "\n")
cluster_columns = self.heatmap_translation['cluster_columns']; #self.control.write(cluster_columns + "\n")
normalization = self.heatmap_translation['normalization']; #self.control.write(normalization + "\n")
contrast = self.heatmap_translation['contrast']; #self.control.write(contrast + "\n")
transpose = self.heatmap_translation['transpose']; #self.control.write(transpose + "\n")
GeneSetSelection = self.heatmap_translation['GeneSetSelection']; #self.control.write(GeneSetSelection + "\n")
PathwaySelection = self.heatmap_translation['PathwaySelection']; #self.control.write(PathwaySelection + "\n")
OntologyID = self.heatmap_translation['OntologyID']; #self.control.write(OntologyID + "\n")
GeneSelection = self.heatmap_translation['GeneSelection']; #self.control.write(GeneSelection + "\n")
justShowTheseIDs = self.heatmap_translation['JustShowTheseIDs']; #self.control.write(JustShowTheseIDs + "\n")
HeatmapAdvanced = self.heatmap_translation['HeatmapAdvanced']; #self.control.write(HeatmapAdvanced + "\n")
clusterGOElite = self.heatmap_translation['ClusterGOElite']; #self.control.write(ClusterGOElite + "\n")
heatmapGeneSets = self.heatmap_translation['heatmapGeneSets']; #self.control.write(heatmapGeneSets + "\n")
if cluster_rows == 'no': row_method = None
if cluster_columns == 'no': column_method = None
HeatmapAdvanced = (HeatmapAdvanced,)
#print ['JustShowTheseIDs',justShowTheseIDs]
if self.DirFile not in self.heatmap_run:
self.heatmap_run[self.DirFile]=None
### occurs when automatically running the heatmap
column_method = None
row_method = None
color_gradient = 'yellow_black_blue'
normalization = 'median'
translate={'None Selected':'','Exclude Cell Cycle Effects':'excludeCellCycle','Top Correlated Only':'top','Positive Correlations Only':'positive','Perform Iterative Discovery':'driver', 'Intra-Correlated Only':'IntraCorrelatedOnly', 'Perform Monocle':'monocle'}
try:
if 'None Selected' in HeatmapAdvanced: ('None Selected')
except Exception: HeatmapAdvanced = ('None Selected')
if ('None Selected' in HeatmapAdvanced and len(HeatmapAdvanced)==1) or 'None Selected' == HeatmapAdvanced: pass
else:
#print HeatmapAdvanced,'kill'
try:
GeneSelection += ' '+string.join(list(HeatmapAdvanced),' ')
for name in translate:
GeneSelection = string.replace(GeneSelection,name,translate[name])
GeneSelection = string.replace(GeneSelection,' ',' ')
if 'top' in GeneSelection or 'driver' in GeneSelection or 'excludeCellCycle' in GeneSelection or 'positive' in GeneSelection or 'IntraCorrelatedOnly' in GeneSelection:
GeneSelection+=' amplify'
except Exception: pass
GeneSetSelection = string.replace(GeneSetSelection,'\n',' ')
GeneSetSelection = string.replace(GeneSetSelection,'\r',' ')
if justShowTheseIDs == '': justShowTheseIDs = 'None Selected'
if GeneSetSelection== '': GeneSetSelection = 'None Selected'
if PathwaySelection== '': PathwaySelection = 'None Selected'
try: rho = float(self.heatmap_translation['CorrelationCutoff'])
except Exception: rho=None
if transpose == 'yes': transpose = True
else: transpose = False
vendor = 'RNASeq'
color_gradient = string.replace(color_gradient,'-','_')
if GeneSetSelection != 'None Selected' or GeneSelection != '' or normalization != 'NA' or JustShowTheseIDs != '' or JustShowTheseIDs != 'None Selected':
gsp = UI.GeneSelectionParameters(self.species,self.platform,vendor)
if rho!=None:
try:
gsp.setRhoCutoff(rho)
GeneSelection = 'amplify '+GeneSelection
except Exception: print 'Must enter a valid Pearson correlation cutoff (float)',traceback.format_exc()
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
gsp.setClusterGOElite(clusterGOElite)
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
#print [GeneSetSelection, PathwaySelection,OntologyID]
remoteCallToAltAnalyze = False
#try: print [gsp.ClusterGOElite()]
#except Exception: print 'dog', traceback.format_exc()
except Exception:
print traceback.format_exc()
if remoteCallToAltAnalyze == False:
try: UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=True)
except Exception: print traceback.format_exc()
else:
try:
command = ['--image', 'hierarchical','--species', self.species,'--platform',self.platform,'--input',input_file_dir, '--display', 'True']
command += ['--column_method',str(column_method),'--column_metric',column_metric]
command += ['--row_method',str(row_method),'--row_metric',row_metric]
command += ['--normalization',normalization,'--transpose',str(transpose),'--contrast',contrast,'--color_gradient',color_gradient]
#print command
command_str = string.join(['']+command,' ')
#print command
package_path = unique.filepath('python')
mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/AltAnalyze')
#os.system(mac_package_path+command_str);sys.exit()
import subprocess
#subprocess.call([mac_package_path, 'C:\\test.txt'])
usePopen = True
if os.name == 'nt':
command = [mac_package_path]+command
DETACHED_PROCESS = 0x00000008
pid = subprocess.Popen(command, creationflags=DETACHED_PROCESS).pid
else:
command = [mac_package_path]+command
if usePopen:
alt_command = ["start"]+command
alt_command = ["start",mac_package_path]
subprocess.call(command) #works but runs in back of the application, detatched
if usePopen==False:
### sampe issue as subprocess.Popen
pid = os.fork()
if pid ==0:
os.execv(mac_package_path,command) ### Kills the parent app
os._exit(0)
"""
retcode = subprocess.call([
apt_file, "-d", cdf_file, "--kill-list", kill_list_dir, "-a", algorithm, "-o", output_dir,
"--cel-files", cel_dir, "-a", "pm-mm,mas5-detect.calls=1.pairs=1"])"""
except Exception:
print traceback.format_exc()
else:
os.chdir(parentDirectory)
RegExMatch = re.findall("exp.", self.DirFile)
if(len(RegExMatch) == 0):
InputFile = self.DirFile.replace("-3D", "")
InputFile = InputFile.replace("-PCA", "")
InputFile = InputFile.replace("DataPlots/Clustering-", "ExpressionOutput/Clustering/")
input_file_dir= InputFile + ".txt"
else:
InputFile = self.DirFile.replace("-3D", "")
InputFile = InputFile.replace("-PCA", "")
InputFile = InputFile.replace("DataPlots/Clustering-", "ExpressionInput/")
input_file_dir= InputFile + ".txt"
if(self.IncludeLabelsRadio.GetValue() == True):
include_labels= 'yes'
else:
include_labels= 'no'
pca_algorithm = 'SVD'
transpose = False
if self.runPCA == False:
include_labels = 'no'
if(self.D_3DRadio.GetValue() == True):
plotType = '3D'
else:
plotType = '2D'
display = True
self.runPCA = True
count,columns = self.verifyFileLength(input_file_dir)
if columns == 3: plotType = '2D' ### only 2 components possible for 2 samples
if count>0:
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None, plotType=plotType, display=display)
else:
self.control.write('PCA input file not present: '+input_file_dir+'\n')
os.chdir(currentDirectory)
self.InteractivePanelUpdate(event)
def verifyFileLength(self,filename):
count = 0; columns=0
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
columns = len(t)
count+=1
if count>9: break
except Exception: null=[]
return count,columns
def OnAbout(self, event):
#Brings up the developer information. Non-functional currently but will be updated eventually.
dial = wx.MessageDialog(None, 'AltAnalyze Results Viewer\nVersion 0.5\n2015', 'About', wx.OK)
dial.ShowModal()
def OnHelp(self, event):
#Brings up the tutorial and dorumentation. Will be updated to a .pdf in the future.
os.chdir(parentDirectory)
ManualPath = rootDirectory + "/Documentation/ViewerManual.pdf"
subprocess.Popen(['open', ManualPath])
os.chdir(currentDirectory)
class ImageFrame(wx.Frame):
#Obsolete code, will be removed almost certainly.
title = "Image"
def __init__(self):
wx.Frame.__init__(self, None, title=self.title)
def remoteViewer(app):
fr = Main(parent=None,id=1)
fr.Show()
app.MainLoop()
if __name__ == "__main__":
app = wx.App(False)
fr = Main(parent=None,id=1)
fr.Show()
app.MainLoop()
| apache-2.0 |
teltek/edx-platform | common/djangoapps/enrollment/api.py | 2 | 18093 | """
Enrollment API for creating, updating, and deleting enrollments. Also provides access to enrollment information at a
course level, such as available course modes.
"""
import importlib
import logging
from django.conf import settings
from django.core.cache import cache
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from enrollment import errors
log = logging.getLogger(__name__)
DEFAULT_DATA_API = 'enrollment.data'
def get_enrollments(user_id, include_inactive=False):
"""Retrieves all the courses a user is enrolled in.
Takes a user and retrieves all relative enrollments. Includes information regarding how the user is enrolled
in the the course.
Args:
user_id (str): The username of the user we want to retrieve course enrollment information for.
include_inactive (bool): Determines whether inactive enrollments will be included
Returns:
A list of enrollment information for the given user.
Examples:
>>> get_enrollments("Bob")
[
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
},
{
"created": "2014-10-25T20:18:00Z",
"mode": "verified",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/edX-Insider/2014T2",
"course_name": "edX Insider Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": True
}
}
]
"""
return _data_api().get_course_enrollments(user_id, include_inactive)
def get_enrollment(user_id, course_id):
"""Retrieves all enrollment information for the user in respect to a specific course.
Gets all the course enrollment information specific to a user in a course.
Args:
user_id (str): The user to get course enrollment information for.
course_id (str): The course to get enrollment information for.
Returns:
A serializable dictionary of the course enrollment.
Example:
>>> get_enrollment("Bob", "edX/DemoX/2014T2")
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
}
"""
return _data_api().get_course_enrollment(user_id, course_id)
def add_enrollment(user_id, course_id, mode=None, is_active=True, enrollment_attributes=None):
"""Enrolls a user in a course.
Enrolls a user in a course. If the mode is not specified, this will default to `CourseMode.DEFAULT_MODE_SLUG`.
Arguments:
user_id (str): The user to enroll.
course_id (str): The course to enroll the user in.
mode (str): Optional argument for the type of enrollment to create. Ex. 'audit', 'honor', 'verified',
'professional'. If not specified, this defaults to the default course mode.
is_active (boolean): Optional argument for making the new enrollment inactive. If not specified, is_active
defaults to True.
enrollment_attributes (list): Attributes to be set the enrollment.
Returns:
A serializable dictionary of the new course enrollment.
Example:
>>> add_enrollment("Bob", "edX/DemoX/2014T2", mode="audit")
{
"created": "2014-10-20T20:18:00Z",
"mode": "audit",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "audit",
"name": "Audit",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
}
"""
if mode is None:
mode = _default_course_mode(course_id)
validate_course_mode(course_id, mode, is_active=is_active)
enrollment = _data_api().create_course_enrollment(user_id, course_id, mode, is_active)
if enrollment_attributes is not None:
set_enrollment_attributes(user_id, course_id, enrollment_attributes)
return enrollment
def update_enrollment(user_id, course_id, mode=None, is_active=None, enrollment_attributes=None, include_expired=False):
"""Updates the course mode for the enrolled user.
Update a course enrollment for the given user and course.
Arguments:
user_id (str): The user associated with the updated enrollment.
course_id (str): The course associated with the updated enrollment.
Keyword Arguments:
mode (str): The new course mode for this enrollment.
is_active (bool): Sets whether the enrollment is active or not.
enrollment_attributes (list): Attributes to be set the enrollment.
include_expired (bool): Boolean denoting whether expired course modes should be included.
Returns:
A serializable dictionary representing the updated enrollment.
Example:
>>> update_enrollment("Bob", "edX/DemoX/2014T2", "honor")
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
}
"""
log.info(u'Starting Update Enrollment process for user {user} in course {course} to mode {mode}'.format(
user=user_id,
course=course_id,
mode=mode,
))
if mode is not None:
validate_course_mode(course_id, mode, is_active=is_active, include_expired=include_expired)
enrollment = _data_api().update_course_enrollment(user_id, course_id, mode=mode, is_active=is_active)
if enrollment is None:
msg = u"Course Enrollment not found for user {user} in course {course}".format(user=user_id, course=course_id)
log.warn(msg)
raise errors.EnrollmentNotFoundError(msg)
else:
if enrollment_attributes is not None:
set_enrollment_attributes(user_id, course_id, enrollment_attributes)
log.info(u'Course Enrollment updated for user {user} in course {course} to mode {mode}'.format(
user=user_id,
course=course_id,
mode=mode
))
return enrollment
def get_course_enrollment_details(course_id, include_expired=False):
"""Get the course modes for course. Also get enrollment start and end date, invite only, etc.
Given a course_id, return a serializable dictionary of properties describing course enrollment information.
Args:
course_id (str): The Course to get enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary of course enrollment information.
Example:
>>> get_course_enrollment_details("edX/DemoX/2014T2")
{
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
"""
cache_key = u'enrollment.course.details.{course_id}.{include_expired}'.format(
course_id=course_id,
include_expired=include_expired
)
cached_enrollment_data = None
try:
cached_enrollment_data = cache.get(cache_key)
except Exception:
# The cache backend could raise an exception (for example, memcache keys that contain spaces)
log.exception(u"Error occurred while retrieving course enrollment details from the cache")
if cached_enrollment_data:
log.info(u"Get enrollment data for course %s (cached)", course_id)
return cached_enrollment_data
course_enrollment_details = _data_api().get_course_enrollment_info(course_id, include_expired)
try:
cache_time_out = getattr(settings, 'ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
cache.set(cache_key, course_enrollment_details, cache_time_out)
except Exception:
# Catch any unexpected errors during caching.
log.exception(u"Error occurred while caching course enrollment details for course %s", course_id)
raise errors.CourseEnrollmentError(u"An unexpected error occurred while retrieving course enrollment details.")
log.info(u"Get enrollment data for course %s", course_id)
return course_enrollment_details
def set_enrollment_attributes(user_id, course_id, attributes):
"""Set enrollment attributes for the enrollment of given user in the
course provided.
Args:
course_id (str): The Course to set enrollment attributes for.
user_id (str): The User to set enrollment attributes for.
attributes (list): Attributes to be set.
Example:
>>>set_enrollment_attributes(
"Bob",
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
)
"""
_data_api().add_or_update_enrollment_attr(user_id, course_id, attributes)
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attributes for given user for provided course.
Args:
user_id: The User to get enrollment attributes for
course_id (str): The Course to get enrollment attributes for.
Example:
>>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015")
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
Returns: list
"""
return _data_api().get_enrollment_attributes(user_id, course_id)
def _default_course_mode(course_id):
"""Return the default enrollment for a course.
Special case the default enrollment to return if nothing else is found.
Arguments:
course_id (str): The course to check against for available course modes.
Returns:
str
"""
course_modes = CourseMode.modes_for_course(CourseKey.from_string(course_id))
available_modes = [m.slug for m in course_modes]
if CourseMode.DEFAULT_MODE_SLUG in available_modes:
return CourseMode.DEFAULT_MODE_SLUG
elif 'audit' in available_modes:
return 'audit'
elif 'honor' in available_modes:
return 'honor'
return CourseMode.DEFAULT_MODE_SLUG
def validate_course_mode(course_id, mode, is_active=None, include_expired=False):
"""Checks to see if the specified course mode is valid for the course.
If the requested course mode is not available for the course, raise an error with corresponding
course enrollment information.
Arguments:
course_id (str): The course to check against for available course modes.
mode (str): The slug for the course mode specified in the enrollment.
Keyword Arguments:
is_active (bool): Whether the enrollment is to be activated or deactivated.
include_expired (bool): Boolean denoting whether expired course modes should be included.
Returns:
None
Raises:
CourseModeNotFound: raised if the course mode is not found.
"""
# If the client has requested an enrollment deactivation, we want to include expired modes
# in the set of available modes. This allows us to unenroll users from expired modes.
# If include_expired is set as True we should not redetermine its value.
if not include_expired:
include_expired = not is_active if is_active is not None else False
course_enrollment_info = _data_api().get_course_enrollment_info(course_id, include_expired=include_expired)
course_modes = course_enrollment_info["course_modes"]
available_modes = [m['slug'] for m in course_modes]
if mode not in available_modes:
msg = (
u"Specified course mode '{mode}' unavailable for course {course_id}. "
u"Available modes were: {available}"
).format(
mode=mode,
course_id=course_id,
available=", ".join(available_modes)
)
log.warn(msg)
raise errors.CourseModeNotFoundError(msg, course_enrollment_info)
def unenroll_user_from_all_courses(user_id):
"""
Unenrolls a specified user from all of the courses they are currently enrolled in.
:param user_id: The id of the user being unenrolled.
:return: The IDs of all of the organizations from which the learner was unenrolled.
"""
return _data_api().unenroll_user_from_all_courses(user_id)
def _data_api():
"""Returns a Data API.
This relies on Django settings to find the appropriate data API.
"""
# We retrieve the settings in-line here (rather than using the
# top-level constant), so that @override_settings will work
# in the test suite.
api_path = getattr(settings, "ENROLLMENT_DATA_API", DEFAULT_DATA_API)
try:
return importlib.import_module(api_path)
except (ImportError, ValueError):
log.exception(u"Could not load module at '{path}'".format(path=api_path))
raise errors.EnrollmentApiLoadError(api_path)
| agpl-3.0 |
mdhaman/superdesk-core | apps/marked_desks/service.py | 3 | 3511 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import json
from flask import current_app as app
from superdesk import get_resource_service
from superdesk.services import BaseService
from eve.utils import ParsedRequest
from superdesk.notification import push_notification
from apps.archive.common import get_user
from eve.utils import config
from superdesk.utc import utcnow
from apps.archive.common import ITEM_MARK, ITEM_UNMARK
def get_marked_items(desk_id):
"""Get items marked for given desk"""
query = {
'query': {'filtered': {'filter': {'term': {'marked_desks.desk_id': str(desk_id)}}}},
'sort': [{'versioncreated': 'desc'}],
'size': 200
}
request = ParsedRequest()
request.args = {'source': json.dumps(query), 'repo': 'archive,published'}
return list(get_resource_service('search').get(req=request, lookup=None))
class MarkedForDesksService(BaseService):
def create(self, docs, **kwargs):
"""Toggle marked desk status for given desk and item."""
service = get_resource_service('archive')
published_service = get_resource_service('published')
ids = []
for doc in docs:
item = service.find_one(req=None, guid=doc['marked_item'])
if not item:
ids.append(None)
continue
ids.append(item['_id'])
marked_desks = item.get('marked_desks', [])
if not marked_desks:
marked_desks = []
existing_mark = next((m for m in marked_desks if m['desk_id'] == doc['marked_desk']), None)
if existing_mark:
# there is an existing mark so this is un-mark action
marked_desks = [m for m in marked_desks if m['desk_id'] != doc['marked_desk']]
marked_desks_on = False # highlight toggled off
else:
# there is no existing mark so this is mark action
user = get_user() or {}
new_mark = {}
new_mark['desk_id'] = doc['marked_desk']
new_mark['user_marked'] = str(user.get(config.ID_FIELD, ''))
new_mark['date_marked'] = utcnow()
marked_desks.append(new_mark)
marked_desks_on = True
updates = {'marked_desks': marked_desks}
service.system_update(item['_id'], updates, item)
publishedItems = published_service.find({'item_id': item['_id']})
for publishedItem in publishedItems:
if publishedItem['_current_version'] == item['_current_version'] or not marked_desks_on:
updates = {'marked_desks': marked_desks}
published_service.system_update(publishedItem['_id'], updates, publishedItem)
push_notification(
'item:marked_desks',
marked=int(marked_desks_on),
item_id=item['_id'],
mark_id=str(doc['marked_desk']))
if marked_desks_on:
app.on_archive_item_updated({'desk_id': doc['marked_desk']}, item, ITEM_MARK)
else:
app.on_archive_item_updated({'desk_id': doc['marked_desk']}, item, ITEM_UNMARK)
return ids
| agpl-3.0 |
BondAnthony/ansible | hacking/tests/gen_distribution_version_testcase.py | 13 | 2703 | #!/usr/bin/env python
"""
This script generated test_cases for test_distribution_version.py.
To do so it outputs the relevant files from /etc/*release, the output of distro.linux_distribution()
and the current ansible_facts regarding the distribution version.
This assumes a working ansible version in the path.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os.path
import platform
import subprocess
import sys
from ansible.module_utils import distro
from ansible.module_utils._text import to_text
filelist = [
'/etc/oracle-release',
'/etc/slackware-version',
'/etc/redhat-release',
'/etc/vmware-release',
'/etc/openwrt_release',
'/etc/system-release',
'/etc/alpine-release',
'/etc/release',
'/etc/arch-release',
'/etc/os-release',
'/etc/SuSE-release',
'/etc/gentoo-release',
'/etc/os-release',
'/etc/lsb-release',
'/etc/altlinux-release',
'/etc/os-release',
'/etc/coreos/update.conf',
'/etc/flatcar/update.conf',
'/usr/lib/os-release',
]
fcont = {}
for f in filelist:
if os.path.exists(f):
s = os.path.getsize(f)
if s > 0 and s < 10000:
with open(f) as fh:
fcont[f] = fh.read()
dist = distro.linux_distribution(full_distribution_name=False)
facts = ['distribution', 'distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']
try:
b_ansible_out = subprocess.check_output(
['ansible', 'localhost', '-m', 'setup'])
except subprocess.CalledProcessError as e:
print("ERROR: ansible run failed, output was: \n")
print(e.output)
sys.exit(e.returncode)
ansible_out = to_text(b_ansible_out)
parsed = json.loads(ansible_out[ansible_out.index('{'):])
ansible_facts = {}
for fact in facts:
try:
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
except Exception:
ansible_facts[fact] = "N/A"
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
output = {
'name': nicename,
'distro': {
'codename': distro.codename(),
'id': distro.id(),
'name': distro.name(),
'version': distro.version(),
'version_best': distro.version(best=True),
'lsb_release_info': distro.lsb_release_info(),
'os_release_info': distro.os_release_info(),
},
'input': fcont,
'platform.dist': dist,
'result': ansible_facts,
}
system = platform.system()
if system != 'Linux':
output['platform.system'] = system
release = platform.release()
if release:
output['platform.release'] = release
print(json.dumps(output, indent=4))
| gpl-3.0 |
prasen-ftech/pywinauto | examples/windowmediaplayer.py | 19 | 2581 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Some automation of Windows Media player"
__revision__ = "$Revision$"
#import os
import time
import sys
try:
from pywinauto import application
except ImportError:
import os.path
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
import sys
sys.path.append(pywinauto_path)
from pywinauto import application
def WindowsMedia():
app = application.Application()
try:
app.start_( # connect_(path =
ur"C:\Program Files\Windows Media Player\wmplayer.exe")
except application.ProcessNotFoundError:
print "You must first start Windows Media "\
"Player before running this script"
sys.exit()
app.WindowsMediaPlayer.MenuSelect("View->GoTo->Library")
app.WindowsMediaPlayer.MenuSelect("View->Choose Columns")
#for ctrl in app.ChooseColumns.Children():
# print ctrl.Class()
print "Is it checked already:", app.ChooseColumsn.ListView.IsChecked(1)
# Check an Item in the listview
app.ChooseColumns.ListView.Check(1)
time.sleep(.5)
print "Shold be checked now:", app.ChooseColumsn.ListView.IsChecked(1)
# Uncheck it
app.ChooseColumns.ListView.UnCheck(1)
time.sleep(.5)
print "Should not be checked now:", app.ChooseColumsn.ListView.IsChecked(1)
# Check it again
app.ChooseColumns.ListView.Check(1)
time.sleep(.5)
app.ChooseColumsn.Cancel.Click()
app.WindowsMediaPlayer.MenuSelect("File->Exit")
def Main():
start = time.time()
WindowsMedia()
print "Total time taken:", time.time() - start
if __name__ == "__main__":
Main() | lgpl-2.1 |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/encodings/idna.py | 215 | 9170 | # This module implements the RFCs 3490 (IDNA) and 3491 (Nameprep)
import stringprep, re, codecs
from unicodedata import ucd_3_2_0 as unicodedata
# IDNA section 3.1
dots = re.compile("[\u002E\u3002\uFF0E\uFF61]")
# IDNA section 5
ace_prefix = b"xn--"
sace_prefix = "xn--"
# This assumes query strings, so AllowUnassigned is true
def nameprep(label):
# Map
newlabel = []
for c in label:
if stringprep.in_table_b1(c):
# Map to nothing
continue
newlabel.append(stringprep.map_table_b2(c))
label = "".join(newlabel)
# Normalize
label = unicodedata.normalize("NFKC", label)
# Prohibit
for c in label:
if stringprep.in_table_c12(c) or \
stringprep.in_table_c22(c) or \
stringprep.in_table_c3(c) or \
stringprep.in_table_c4(c) or \
stringprep.in_table_c5(c) or \
stringprep.in_table_c6(c) or \
stringprep.in_table_c7(c) or \
stringprep.in_table_c8(c) or \
stringprep.in_table_c9(c):
raise UnicodeError("Invalid character %r" % c)
# Check bidi
RandAL = [stringprep.in_table_d1(x) for x in label]
for c in RandAL:
if c:
# There is a RandAL char in the string. Must perform further
# tests:
# 1) The characters in section 5.8 MUST be prohibited.
# This is table C.8, which was already checked
# 2) If a string contains any RandALCat character, the string
# MUST NOT contain any LCat character.
if any(stringprep.in_table_d2(x) for x in label):
raise UnicodeError("Violation of BIDI requirement 2")
# 3) If a string contains any RandALCat character, a
# RandALCat character MUST be the first character of the
# string, and a RandALCat character MUST be the last
# character of the string.
if not RandAL[0] or not RandAL[-1]:
raise UnicodeError("Violation of BIDI requirement 3")
return label
def ToASCII(label):
try:
# Step 1: try ASCII
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 3: UseSTD3ASCIIRules is false, so
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 2: nameprep
label = nameprep(label)
# Step 3: UseSTD3ASCIIRules is false
# Step 4: try ASCII
try:
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 5: Check ACE prefix
if label.startswith(sace_prefix):
raise UnicodeError("Label starts with ACE prefix")
# Step 6: Encode with PUNYCODE
label = label.encode("punycode")
# Step 7: Prepend ACE prefix
label = ace_prefix + label
# Step 8: Check size
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
def ToUnicode(label):
# Step 1: Check for ASCII
if isinstance(label, bytes):
pure_ascii = True
else:
try:
label = label.encode("ascii")
pure_ascii = True
except UnicodeError:
pure_ascii = False
if not pure_ascii:
# Step 2: Perform nameprep
label = nameprep(label)
# It doesn't say this, but apparently, it should be ASCII now
try:
label = label.encode("ascii")
except UnicodeError:
raise UnicodeError("Invalid character in IDN label")
# Step 3: Check for ACE prefix
if not label.startswith(ace_prefix):
return str(label, "ascii")
# Step 4: Remove ACE prefix
label1 = label[len(ace_prefix):]
# Step 5: Decode using PUNYCODE
result = label1.decode("punycode")
# Step 6: Apply ToASCII
label2 = ToASCII(result)
# Step 7: Compare the result of step 6 with the one of step 3
# label2 will already be in lower case.
if str(label, "ascii").lower() != str(label2, "ascii"):
raise UnicodeError("IDNA does not round-trip", label, label2)
# Step 8: return the result of step 5
return result
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return b'', 0
try:
result = input.encode('ascii')
except UnicodeEncodeError:
pass
else:
# ASCII name: fast path
labels = result.split(b'.')
for label in labels[:-1]:
if not (0 < len(label) < 64):
raise UnicodeError("label empty or too long")
if len(labels[-1]) >= 64:
raise UnicodeError("label too long")
return result, len(input)
result = bytearray()
labels = dots.split(input)
if labels and not labels[-1]:
trailing_dot = b'.'
del labels[-1]
else:
trailing_dot = b''
for label in labels:
if result:
# Join with U+002E
result.extend(b'.')
result.extend(ToASCII(label))
return bytes(result+trailing_dot), len(input)
def decode(self, input, errors='strict'):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return "", 0
# IDNA allows decoding to operate on Unicode strings, too.
if not isinstance(input, bytes):
# XXX obviously wrong, see #3232
input = bytes(input)
if ace_prefix not in input:
# Fast path
try:
return input.decode('ascii'), len(input)
except UnicodeDecodeError:
pass
labels = input.split(b".")
if labels and len(labels[-1]) == 0:
trailing_dot = '.'
del labels[-1]
else:
trailing_dot = ''
result = []
for label in labels:
result.append(ToUnicode(label))
return ".".join(result)+trailing_dot, len(input)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, input, errors, final):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return (b'', 0)
labels = dots.split(input)
trailing_dot = b''
if labels:
if not labels[-1]:
trailing_dot = b'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = b'.'
result = bytearray()
size = 0
for label in labels:
if size:
# Join with U+002E
result.extend(b'.')
size += 1
result.extend(ToASCII(label))
size += len(label)
result += trailing_dot
size += len(trailing_dot)
return (bytes(result), size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return ("", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(input, str):
labels = dots.split(input)
else:
# Must be ASCII string
input = str(input, "ascii")
labels = input.split(".")
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(ToUnicode(label))
if size:
size += 1
size += len(label)
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-3.0 |
rturumella/CloudBot | plugins/reddit.py | 2 | 3282 | from datetime import datetime
import re
import random
import asyncio
import functools
import urllib.parse
import requests
from cloudbot import hook
from cloudbot.util import timeformat, formatting
reddit_re = re.compile(r'.*(((www\.)?reddit\.com/r|redd\.it)[^ ]+)', re.I)
base_url = "http://reddit.com/r/{}/.json"
short_url = "http://redd.it/{}"
def format_output(item, show_url=False):
""" takes a reddit post and returns a formatted sting """
item["title"] = formatting.truncate(item["title"], 50)
item["link"] = short_url.format(item["id"])
raw_time = datetime.fromtimestamp(int(item["created_utc"]))
item["timesince"] = timeformat.timesince(raw_time, count=1)
item["comments"] = formatting.pluralize(item["num_comments"], 'comment')
item["points"] = formatting.pluralize(item["score"], 'point')
if item["over_18"]:
item["warning"] = " \x02NSFW\x02"
else:
item["warning"] = ""
if show_url:
return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago - {comments}, {points} -" \
" {link}{warning}".format(**item)
else:
return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago - {comments}, {points}{warning}".format(**item)
@hook.regex(reddit_re)
def reddit_url(match, bot):
url = match.group(1)
if "redd.it" in url:
url = "http://" + url
response = requests.get(url)
url = response.url + "/.json"
if not urllib.parse.urlparse(url).scheme:
url = "http://" + url + "/.json"
# the reddit API gets grumpy if we don't include headers
headers = {'User-Agent': bot.user_agent}
r = requests.get(url, headers=headers)
data = r.json()
item = data[0]["data"]["children"][0]["data"]
return format_output(item)
@asyncio.coroutine
@hook.command(autohelp=False)
def reddit(text, bot, loop):
"""<subreddit> [n] - gets a random post from <subreddit>, or gets the [n]th post in the subreddit"""
id_num = None
headers = {'User-Agent': bot.user_agent}
if text:
# clean and split the input
parts = text.lower().strip().split()
# find the requested post number (if any)
if len(parts) > 1:
url = base_url.format(parts[0].strip())
try:
id_num = int(parts[1]) - 1
except ValueError:
return "Invalid post number."
else:
url = base_url.format(parts[0].strip())
else:
url = "http://reddit.com/.json"
try:
# Again, identify with Reddit using an User Agent, otherwise get a 429
inquiry = yield from loop.run_in_executor(None, functools.partial(requests.get, url, headers=headers))
data = inquiry.json()
except Exception as e:
return "Error: " + str(e)
data = data["data"]["children"]
# get the requested/random post
if id_num is not None:
try:
item = data[id_num]["data"]
except IndexError:
length = len(data)
return "Invalid post number. Number must be between 1 and {}.".format(length)
else:
item = random.choice(data)["data"]
return format_output(item, show_url=True)
| gpl-3.0 |
f2nd/yandex-tank | yandextank/stepper/tests/test_load_plan.py | 4 | 6624 | import pytest
from yandextank.stepper.load_plan import create, Const, Line, Composite, Stairway, StepFactory
from yandextank.stepper.util import take
class TestLine(object):
def test_get_rps_list(self):
lp = create(["line(1, 100, 10s)"])
rps_list = lp.get_rps_list()
assert len(rps_list) == 11
assert rps_list[-1][0] == 100
@pytest.mark.parametrize(
"rps, duration, rps_list",
[(100, 3000, [(100, 3)]), (0, 3000, [(0, 3)]), (100, 0, [(100, 0)])])
class TestConst(object):
@pytest.mark.parametrize(
"check_point, expected",
[(lambda duration: 0, lambda rps: rps),
(lambda duration: duration / 2, lambda rps: rps),
(lambda duration: duration + 1, lambda rps: 0),
(lambda duration: -1, lambda rps: 0)])
def test_rps_at(self, rps, duration, rps_list, check_point, expected):
assert Const(rps,
duration).rps_at(check_point(duration)) == expected(rps)
def test_get_rps_list(self, rps, duration, rps_list):
assert Const(rps, duration).get_rps_list() == rps_list
assert isinstance(rps_list[0][1], int)
class TestLineNew(object):
@pytest.mark.parametrize(
"min_rps, max_rps, duration, check_point, expected",
[(0, 10, 30 * 1000, 0, 0), (0, 10, 30 * 1000, 10, 3),
(0, 10, 30 * 1000, 29, 10), (9, 10, 30 * 1000, 1, 9),
(9, 10, 30 * 1000, 20, 10)])
def test_rps_at(self, min_rps, max_rps, duration, check_point, expected):
assert round(Line(min_rps, max_rps, duration).rps_at(
check_point)) == expected
@pytest.mark.parametrize(
"min_rps, max_rps, duration, check_point, expected",
[
(0, 10, 20 * 1000, 9, (9, 2)),
(0, 10, 30 * 1000, 0, (0, 2)),
(0, 10, 30 * 1000, 5, (5, 3)),
(0, 10, 30 * 1000, 10, (10, 2)),
(0, 10, 3 * 1000, 0, (0, 1)),
(0, 10, 3 * 1000, 1, (3, 1)),
(0, 10, 3 * 1000, 2, (7, 1)),
(0, 10, 3 * 1000, 3, (10, 1)),
(9, 10, 30 * 1000, 0, (9, 15)),
(9, 10, 30 * 1000, 1, (10, 16)),
(10, 10, 30 * 1000, 0, (10, 31)), # strange
(10, 0, 30 * 1000, 0, (10, 2)),
(10, 0, 30 * 1000, 1, (9, 3)),
(10, 0, 30 * 1000, 9, (1, 3)),
(10, 0, 30 * 1000, 10, (0, 2)),
])
def test_get_rps_list(
self, min_rps, max_rps, duration, check_point, expected):
assert Line(min_rps, max_rps,
duration).get_rps_list()[check_point] == expected
@pytest.mark.parametrize(
"min_rps, max_rps, duration, expected_len, threshold, len_above_threshold",
[
(2, 12, 25000, 175, 5000, 160),
(2, 12, 25000, 175, 10000, 135),
(2, 12, 25000, 175, 15000, 100),
(2, 12, 25000, 175, 20000, 55),
(0, 10, 25000, 125, 15000, 80),
(10, 12, 20000, 220, 10000, 115),
(10, 10, 20000, 200, 10000, 100),
(10, 0, 25000, 125, 10000, 45),
(10, 0, 25000, 125, 15000, 20),
])
def test_iter(
self, min_rps, max_rps, duration, expected_len, threshold,
len_above_threshold):
load_plan = Line(min_rps, max_rps, duration)
assert len(load_plan) == expected_len
assert len(
[ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestComposite(object):
@pytest.mark.parametrize(
"steps, expected_len", [([Line(0, 10, 20000), Const(10, 10000)], 200),
([Line(0, 10, 20000), Line(10, 0, 20000)], 200),
([Const(5, 10000), Const(10, 5000)], 100)])
def test_iter(self, steps, expected_len):
assert len(Composite(steps)) == expected_len
@pytest.mark.parametrize(
"steps, check_point, expected", [
([Line(0, 10, 20000), Const(10, 10000)], 9, (9, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 10, (10, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 11, (10, 10)),
])
def test_rps_list(self, steps, check_point, expected):
assert Composite(steps).get_rps_list()[check_point] == expected
class TestStairway(object):
@pytest.mark.parametrize(
"min_rps, max_rps, increment, step_duration, expected_len, threshold, len_above_threshold",
[(0, 1000, 50, 3000, 31500, 9000, 31050),
(0, 1000, 50, 3000, 31500, 15000, 30000),
(0, 1000, 50, 3000, 31500, 45000, 15750)])
def test_iter(
self, min_rps, max_rps, increment, step_duration, expected_len,
threshold, len_above_threshold):
load_plan = Stairway(min_rps, max_rps, increment, step_duration)
assert len(load_plan) == expected_len
assert len(
[ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestCreate(object):
@pytest.mark.parametrize(
'rps_schedule, check_point, expected', [
(['line(1, 5, 2s)'], 100, [0, 618, 1000, 1302, 1561, 1791]),
(['line(1.1, 5.8, 2s)'], 100, [0, 566, 917, 1196, 1435, 1647]),
(['line(5, 1, 2s)'], 100, [0, 208, 438, 697, 1000, 1381]),
(['const(1, 10s)'], 100,
[0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]),
(['const(200, 0.1s)'], 100, [
0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75,
80, 85, 90, 95
]),
(['const(1, 2s)', 'const(2, 2s)'], 100,
[0, 1000, 2000, 2500, 3000, 3500]),
(['const(1.5, 10s)'], 100, [
0, 666, 1333, 2000, 2666, 3333, 4000, 4666, 5333, 6000, 6666,
7333, 8000, 8666, 9333
]),
(['step(1, 5, 1, 5s)'], 10,
[0, 1000, 2000, 3000, 4000, 5000, 5500, 6000, 6500, 7000]),
(['step(1.2, 5.7, 1.1, 5s)'], 10,
[0, 833, 1666, 2500, 3333, 4166, 5000, 5434, 5869, 6304]),
(['const(1, 1)'], 10, [0]),
])
def test_create(self, rps_schedule, check_point, expected):
# pytest.set_trace()
assert take(check_point, (create(rps_schedule))) == expected
# ([0-9.]+d)?([0-9.]+h)?([0-9.]+m)?([0-9.]+s)?
@pytest.mark.parametrize('step_config, expected_duration', [
('line(1,500,1m30s)', 90),
('const(50,1h30s)', 3630 * 1000),
('step(10,200,10,1h20m)', 4800 * 1000)
])
def test_step_factory(step_config, expected_duration):
steps = StepFactory.produce(step_config)
assert steps.duration == expected_duration
| lgpl-2.1 |
sjohns09/MSRDM | vendor/googletest/googlemock/scripts/fuse_gmock_files.py | 242 | 8631 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest
directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to [email protected]. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into googletest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, '../googletest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| lgpl-3.0 |
nathanial/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/tests/geogapp/tests.py | 222 | 4080 | """
Tests for geography support in PostGIS 1.5+
"""
import os
from django.contrib.gis import gdal
from django.contrib.gis.measure import D
from django.test import TestCase
from models import City, County, Zipcode
class GeographyTest(TestCase):
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.distance(htown.point)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
if not gdal.HAS_GDAL: return
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name' : 'Name',
'state' : 'State',
'mpoly' : 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
from django.contrib.gis.measure import A
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
| gpl-3.0 |
cloud-engineering/wifi | db.py | 1 | 2100 | # Python Standard Library Imports
import logging
# External Imports
from sqlalchemy import Column
from sqlalchemy import String, INTEGER, FLOAT
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
# Custom Imports
import config
import json
engine = create_engine('sqlite:///' + config.db.PATH, echo=config.db.DEBUG)
DeclarativeBase = declarative_base(engine)
Session = scoped_session(sessionmaker(engine))
class Event(DeclarativeBase):
__tablename__ = 'events'
id = Column(INTEGER, primary_key=True)
device_serial = Column(String)
mode = Column(INTEGER)
time_stamp = Column(INTEGER)
phaseA = Column(FLOAT)
phaseB = Column(FLOAT)
phaseC = Column(FLOAT)
voltage = Column(FLOAT)
def __init__(self, device_serial, mode, time_stamp, phaseA, phaseB, phaseC, voltage):
self.device_serial = device_serial
self.mode = mode
self.time_stamp = time_stamp
self.phaseA = phaseA
self.phaseB = phaseB
self.phaseC = phaseC
self.voltage = voltage
'''
After spending more than 2 days in data formating, it turns out python suprised me again with its simplicity.
_repr_ function can serialise a standard python dictionary I created into a json object and return it.
I can now recieve serialezed json each time I query against the database - thanks to SQLAlchemy - see implementation below.
'''
def __repr__(self):
return json.dumps({
"DeviceSerial" : self.device_serial,
"Mode" : self.mode,
"Events" : [{"TimeStamp":self.time_stamp,"PhaseA":self.phaseA,"PhaseB":self.phaseB,"PhaseC":self.phaseC,"Voltage":self.voltage}]
})
#'(%s,%d, %d, %f, %f, %f, %f0' % (self.device_serial, self.mode, self.time_stamp, self.phase_1, self.phase_2, self.phase_3, self.voltage)
def initialise():
logging.info('Initialising the database.')
DeclarativeBase.metadata.create_all()
Session.execute('PRAGMA journal_mode = WAL') | mit |
yanikou19/pymatgen | pymatgen/io/aseio.py | 3 | 1743 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module provides conversion between the Atomic Simulation Environment
Atoms object and pymatgen Structure objects.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 8, 2012"
from pymatgen.core.structure import Structure
try:
from ase import Atoms
ase_loaded = True
except ImportError:
ase_loaded = False
class AseAtomsAdaptor(object):
"""
Adaptor serves as a bridge between ASE Atoms and pymatgen structure.
"""
@staticmethod
def get_atoms(structure):
"""
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
Returns:
ASE Atoms object
"""
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell)
@staticmethod
def get_structure(atoms):
"""
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
Returns:
Equivalent pymatgen.core.structure.Structure
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
return Structure(lattice, symbols, positions,
coords_are_cartesian=True)
| mit |
uchuugaka/anaconda | anaconda_lib/jedi/parser/__init__.py | 38 | 16804 | """
The ``Parser`` tries to convert the available Python code in an easy to read
format, something like an abstract syntax tree. The classes who represent this
tree, are sitting in the :mod:`jedi.parser.tree` module.
The Python module ``tokenize`` is a very important part in the ``Parser``,
because it splits the code into different words (tokens). Sometimes it looks a
bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast``
module for this? Well, ``ast`` does a very good job understanding proper Python
code, but fails to work as soon as there's a single line of broken code.
There's one important optimization that needs to be known: Statements are not
being parsed completely. ``Statement`` is just a representation of the tokens
within the statement. This lowers memory usage and cpu time and reduces the
complexity of the ``Parser`` (there's another parser sitting inside
``Statement``, which produces ``Array`` and ``Call``).
"""
import os
import re
from jedi.parser import tree as pt
from jedi.parser import tokenize
from jedi.parser import token
from jedi.parser.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER,
STRING, OP, ERRORTOKEN)
from jedi.parser.pgen2.pgen import generate_grammar
from jedi.parser.pgen2.parse import PgenParser
OPERATOR_KEYWORDS = 'and', 'for', 'if', 'else', 'in', 'is', 'lambda', 'not', 'or'
# Not used yet. In the future I intend to add something like KeywordStatement
STATEMENT_KEYWORDS = 'assert', 'del', 'global', 'nonlocal', 'raise', \
'return', 'yield', 'pass', 'continue', 'break'
_loaded_grammars = {}
def load_grammar(file='grammar3.4'):
# For now we only support two different Python syntax versions: The latest
# Python 3 and Python 2. This may change.
if file.startswith('grammar3'):
file = 'grammar3.4'
else:
file = 'grammar2.7'
global _loaded_grammars
path = os.path.join(os.path.dirname(__file__), file) + '.txt'
try:
return _loaded_grammars[path]
except KeyError:
return _loaded_grammars.setdefault(path, generate_grammar(path))
class ErrorStatement(object):
def __init__(self, stack, next_token, position_modifier, next_start_pos):
self.stack = stack
self._position_modifier = position_modifier
self.next_token = next_token
self._next_start_pos = next_start_pos
@property
def next_start_pos(self):
s = self._next_start_pos
return s[0] + self._position_modifier.line, s[1]
@property
def first_pos(self):
first_type, nodes = self.stack[0]
return nodes[0].start_pos
@property
def first_type(self):
first_type, nodes = self.stack[0]
return first_type
class ParserSyntaxError(object):
def __init__(self, message, position):
self.message = message
self.position = position
class Parser(object):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param grammar: The grammar object of pgen2. Loaded by load_grammar.
:param source: The codebase for the parser. Must be unicode.
:param module_path: The path of the module in the file system, may be None.
:type module_path: str
:param top_module: Use this module as a parent instead of `self.module`.
"""
def __init__(self, grammar, source, module_path=None, tokenizer=None):
self._ast_mapping = {
'expr_stmt': pt.ExprStmt,
'classdef': pt.Class,
'funcdef': pt.Function,
'file_input': pt.Module,
'import_name': pt.ImportName,
'import_from': pt.ImportFrom,
'break_stmt': pt.KeywordStatement,
'continue_stmt': pt.KeywordStatement,
'return_stmt': pt.ReturnStmt,
'raise_stmt': pt.KeywordStatement,
'yield_expr': pt.YieldExpr,
'del_stmt': pt.KeywordStatement,
'pass_stmt': pt.KeywordStatement,
'global_stmt': pt.GlobalStmt,
'nonlocal_stmt': pt.KeywordStatement,
'assert_stmt': pt.AssertStmt,
'if_stmt': pt.IfStmt,
'with_stmt': pt.WithStmt,
'for_stmt': pt.ForStmt,
'while_stmt': pt.WhileStmt,
'try_stmt': pt.TryStmt,
'comp_for': pt.CompFor,
'decorator': pt.Decorator,
'lambdef': pt.Lambda,
'old_lambdef': pt.Lambda,
'lambdef_nocond': pt.Lambda,
}
self.syntax_errors = []
self._global_names = []
self._omit_dedent_list = []
self._indent_counter = 0
self._last_failed_start_pos = (0, 0)
# TODO do print absolute import detection here.
#try:
# del python_grammar_no_print_statement.keywords["print"]
#except KeyError:
# pass # Doesn't exist in the Python 3 grammar.
#if self.options["print_function"]:
# python_grammar = pygram.python_grammar_no_print_statement
#else:
self._used_names = {}
self._scope_names_stack = [{}]
self._error_statement_stacks = []
added_newline = False
# The Python grammar needs a newline at the end of each statement.
if not source.endswith('\n'):
source += '\n'
added_newline = True
# For the fast parser.
self.position_modifier = pt.PositionModifier()
p = PgenParser(grammar, self.convert_node, self.convert_leaf,
self.error_recovery)
tokenizer = tokenizer or tokenize.source_tokens(source)
self.module = p.parse(self._tokenize(tokenizer))
if self.module.type != 'file_input':
# If there's only one statement, we get back a non-module. That's
# not what we want, we want a module, so we add it here:
self.module = self.convert_node(grammar,
grammar.symbol2number['file_input'],
[self.module])
if added_newline:
self.remove_last_newline()
self.module.used_names = self._used_names
self.module.path = module_path
self.module.global_names = self._global_names
self.module.error_statement_stacks = self._error_statement_stacks
def convert_node(self, grammar, type, children):
"""
Convert raw node information to a Node instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
symbol = grammar.number2symbol[type]
try:
new_node = self._ast_mapping[symbol](children)
except KeyError:
new_node = pt.Node(symbol, children)
# We need to check raw_node always, because the same node can be
# returned by convert multiple times.
if symbol == 'global_stmt':
self._global_names += new_node.get_global_names()
elif isinstance(new_node, pt.Lambda):
new_node.names_dict = self._scope_names_stack.pop()
elif isinstance(new_node, (pt.ClassOrFunc, pt.Module)) \
and symbol in ('funcdef', 'classdef', 'file_input'):
# scope_name_stack handling
scope_names = self._scope_names_stack.pop()
if isinstance(new_node, pt.ClassOrFunc):
n = new_node.name
scope_names[n.value].remove(n)
# Set the func name of the current node
arr = self._scope_names_stack[-1].setdefault(n.value, [])
arr.append(n)
new_node.names_dict = scope_names
elif isinstance(new_node, pt.CompFor):
# The name definitions of comprehenions shouldn't be part of the
# current scope. They are part of the comprehension scope.
for n in new_node.get_defined_names():
self._scope_names_stack[-1][n.value].remove(n)
return new_node
def convert_leaf(self, grammar, type, value, prefix, start_pos):
#print('leaf', value, pytree.type_repr(type))
if type == tokenize.NAME:
if value in grammar.keywords:
if value in ('def', 'class', 'lambda'):
self._scope_names_stack.append({})
return pt.Keyword(self.position_modifier, value, start_pos, prefix)
else:
name = pt.Name(self.position_modifier, value, start_pos, prefix)
# Keep a listing of all used names
arr = self._used_names.setdefault(name.value, [])
arr.append(name)
arr = self._scope_names_stack[-1].setdefault(name.value, [])
arr.append(name)
return name
elif type == STRING:
return pt.String(self.position_modifier, value, start_pos, prefix)
elif type == NUMBER:
return pt.Number(self.position_modifier, value, start_pos, prefix)
elif type in (NEWLINE, ENDMARKER):
return pt.Whitespace(self.position_modifier, value, start_pos, prefix)
else:
return pt.Operator(self.position_modifier, value, start_pos, prefix)
def error_recovery(self, grammar, stack, typ, value, start_pos, prefix,
add_token_callback):
"""
This parser is written in a dynamic way, meaning that this parser
allows using different grammars (even non-Python). However, error
recovery is purely written for Python.
"""
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for index, (dfa, state, (typ, nodes)) in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
symbol = grammar.number2symbol[typ]
if symbol == 'file_input':
break
elif symbol == 'suite' and len(nodes) > 1:
# suites without an indent in them get discarded.
break
elif symbol == 'simple_stmt' and len(nodes) > 1:
# simple_stmt can just be turned into a Node, if there are
# enough statements. Ignore the rest after that.
break
return index, symbol, nodes
index, symbol, nodes = current_suite(stack)
if symbol == 'simple_stmt':
index -= 2
(_, _, (typ, suite_nodes)) = stack[index]
symbol = grammar.number2symbol[typ]
suite_nodes.append(pt.Node(symbol, list(nodes)))
# Remove
nodes[:] = []
nodes = suite_nodes
stack[index]
#print('err', token.tok_name[typ], repr(value), start_pos, len(stack), index)
self._stack_removal(grammar, stack, index + 1, value, start_pos)
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
if value in ('import', 'from', 'class', 'def', 'try', 'while', 'return'):
# Those can always be new statements.
add_token_callback(typ, value, prefix, start_pos)
elif typ == DEDENT and symbol == 'suite':
# Close the current suite, with DEDENT.
# Note that this may cause some suites to not contain any
# statements at all. This is contrary to valid Python syntax. We
# keep incomplete suites in Jedi to be able to complete param names
# or `with ... as foo` names. If we want to use this parser for
# syntax checks, we have to check in a separate turn if suites
# contain statements or not. However, a second check is necessary
# anyway (compile.c does that for Python), because Python's grammar
# doesn't stop you from defining `continue` in a module, etc.
add_token_callback(typ, value, prefix, start_pos)
def _stack_removal(self, grammar, stack, start_index, value, start_pos):
def clear_names(children):
for c in children:
try:
clear_names(c.children)
except AttributeError:
if isinstance(c, pt.Name):
try:
self._scope_names_stack[-1][c.value].remove(c)
self._used_names[c.value].remove(c)
except ValueError:
pass # This may happen with CompFor.
for dfa, state, node in stack[start_index:]:
clear_names(children=node[1])
failed_stack = []
found = False
for dfa, state, (typ, nodes) in stack[start_index:]:
if nodes:
found = True
if found:
symbol = grammar.number2symbol[typ]
failed_stack.append((symbol, nodes))
if nodes and nodes[0] in ('def', 'class', 'lambda'):
self._scope_names_stack.pop()
if failed_stack:
err = ErrorStatement(failed_stack, value, self.position_modifier, start_pos)
self._error_statement_stacks.append(err)
self._last_failed_start_pos = start_pos
stack[start_index:] = []
def _tokenize(self, tokenizer):
for typ, value, start_pos, prefix in tokenizer:
#print(tokenize.tok_name[typ], repr(value), start_pos, repr(prefix))
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
elif typ == ERRORTOKEN:
self._add_syntax_error('Strange token', start_pos)
continue
if typ == OP:
typ = token.opmap[value]
yield typ, value, prefix, start_pos
def _add_syntax_error(self, message, position):
self.syntax_errors.append(ParserSyntaxError(message, position))
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, self.module)
def remove_last_newline(self):
"""
In all of this we need to work with _start_pos, because if we worked
with start_pos, we would need to check the position_modifier as well
(which is accounted for in the start_pos property).
"""
endmarker = self.module.children[-1]
# The newline is either in the endmarker as a prefix or the previous
# leaf as a newline token.
if endmarker.prefix.endswith('\n'):
endmarker.prefix = endmarker.prefix[:-1]
last_line = re.sub('.*\n', '', endmarker.prefix)
endmarker._start_pos = endmarker._start_pos[0] - 1, len(last_line)
else:
try:
newline = endmarker.get_previous()
except IndexError:
return # This means that the parser is empty.
while True:
if newline.value == '':
# Must be a DEDENT, just continue.
try:
newline = newline.get_previous()
except IndexError:
# If there's a statement that fails to be parsed, there
# will be no previous leaf. So just ignore it.
break
elif newline.value != '\n':
# This may happen if error correction strikes and removes
# a whole statement including '\n'.
break
else:
newline.value = ''
if self._last_failed_start_pos > newline._start_pos:
# It may be the case that there was a syntax error in a
# function. In that case error correction removes the
# right newline. So we use the previously assigned
# _last_failed_start_pos variable to account for that.
endmarker._start_pos = self._last_failed_start_pos
else:
endmarker._start_pos = newline._start_pos
break
| gpl-3.0 |
revjunkie/lge-g2-d802 | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
bigdatauniversity/edx-platform | lms/lib/courseware_search/lms_filter_generator.py | 43 | 2452 | """
This file contains implementation override of SearchFilterGenerator which will allow
* Filter by all courses in which the user is enrolled in
"""
from microsite_configuration import microsite
from student.models import CourseEnrollment
from search.filter_generator import SearchFilterGenerator
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from openedx.core.djangoapps.course_groups.partition_scheme import CohortPartitionScheme
INCLUDE_SCHEMES = [CohortPartitionScheme, RandomUserPartitionScheme, ]
SCHEME_SUPPORTS_ASSIGNMENT = [RandomUserPartitionScheme, ]
class LmsSearchFilterGenerator(SearchFilterGenerator):
""" SearchFilterGenerator for LMS Search """
_user_enrollments = {}
def _enrollments_for_user(self, user):
""" Return the specified user's course enrollments """
if user not in self._user_enrollments:
self._user_enrollments[user] = CourseEnrollment.enrollments_for_user(user)
return self._user_enrollments[user]
def field_dictionary(self, **kwargs):
""" add course if provided otherwise add courses in which the user is enrolled in """
field_dictionary = super(LmsSearchFilterGenerator, self).field_dictionary(**kwargs)
if not kwargs.get('user'):
field_dictionary['course'] = []
elif not kwargs.get('course_id'):
user_enrollments = self._enrollments_for_user(kwargs['user'])
field_dictionary['course'] = [unicode(enrollment.course_id) for enrollment in user_enrollments]
# if we have an org filter, only include results for this org filter
course_org_filter = microsite.get_value('course_org_filter')
if course_org_filter:
field_dictionary['org'] = course_org_filter
return field_dictionary
def exclude_dictionary(self, **kwargs):
""" If we are not on a microsite, then exclude any microsites that are defined """
exclude_dictionary = super(LmsSearchFilterGenerator, self).exclude_dictionary(**kwargs)
course_org_filter = microsite.get_value('course_org_filter')
# If we have a course filter we are ensuring that we only get those courses above
if not course_org_filter:
org_filter_out_set = microsite.get_all_orgs()
if org_filter_out_set:
exclude_dictionary['org'] = list(org_filter_out_set)
return exclude_dictionary
| agpl-3.0 |
Mirantis/mos-horizon | openstack_dashboard/templatetags/themes.py | 14 | 2548 | # Copyright 2016 Hewlett Packard Enterprise Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
from six.moves.urllib.request import pathname2url
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django import template
from horizon import themes as hz_themes
register = template.Library()
def get_theme(request):
this_theme = hz_themes.get_default_theme()
try:
theme = request.COOKIES[hz_themes.get_theme_cookie_name()]
for each_theme in hz_themes.get_themes():
if theme == each_theme[0]:
this_theme = each_theme[0]
except KeyError:
pass
return this_theme
def find_asset(theme, asset):
theme_path = ''
for name, label, path in hz_themes.get_themes():
if theme == name:
theme_path = path
theme_path = os.path.join(settings.ROOT_PATH, theme_path)
# If there is a 'static' subdir of the theme, then use
# that as the theme's asset root path
static_path = os.path.join(theme_path, 'static')
if os.path.exists(static_path):
theme_path = static_path
# The full path to the asset requested
asset_path = os.path.join(theme_path, asset)
if os.path.exists(asset_path):
return_path = os.path.join(hz_themes.get_theme_dir(), theme, asset)
else:
return_path = os.path.join('dashboard', asset)
return staticfiles_storage.url(pathname2url(return_path))
@register.assignment_tag()
def themes():
return hz_themes.get_themes()
@register.assignment_tag()
def theme_cookie():
return hz_themes.get_theme_cookie_name()
@register.assignment_tag()
def theme_dir():
return hz_themes.get_theme_dir()
@register.assignment_tag(takes_context=True)
def current_theme(context):
return get_theme(context.request)
@register.simple_tag(takes_context=True)
def themable_asset(context, asset):
return find_asset(get_theme(context.request), asset)
| apache-2.0 |
richard-willowit/odoo | addons/web/models/ir_http.py | 6 | 2130 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import models
from odoo.http import request
import odoo
class Http(models.AbstractModel):
_inherit = 'ir.http'
def webclient_rendering_context(self):
return {
'menu_data': request.env['ir.ui.menu'].load_menus(request.debug),
'session_info': json.dumps(self.session_info()),
}
def session_info(self):
user = request.env.user
display_switch_company_menu = user.has_group('base.group_multi_company') and len(user.company_ids) > 1
version_info = odoo.service.common.exp_version()
return {
"session_id": request.session.sid,
"uid": request.session.uid,
"is_system": request.env.user._is_system(),
"is_superuser": request.env.user._is_superuser(),
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"server_version": version_info.get('server_version'),
"server_version_info": version_info.get('server_version_info'),
"name": user.name,
"username": user.login,
"company_id": request.env.user.company_id.id if request.session.uid else None,
"partner_id": request.env.user.partner_id.id if request.session.uid and request.env.user.partner_id else None,
"user_companies": {'current_company': (user.company_id.id, user.company_id.name), 'allowed_companies': [(comp.id, comp.name) for comp in user.company_ids]} if display_switch_company_menu else False,
"currencies": self.get_currencies(),
"web.base.url": self.env['ir.config_parameter'].sudo().get_param('web.base.url', default=''),
}
def get_currencies(self):
Currency = request.env['res.currency']
currencies = Currency.search([]).read(['symbol', 'position', 'decimal_places'])
return { c['id']: {'symbol': c['symbol'], 'position': c['position'], 'digits': [69,c['decimal_places']]} for c in currencies}
| gpl-3.0 |
alope107/nbgrader | nbgrader/tests/api/test_gradebook.py | 4 | 25911 | import pytest
from datetime import datetime
from nbgrader import api
from nbgrader import utils
from nbgrader.api import InvalidEntry, MissingEntry
@pytest.fixture
def gradebook(request):
gb = api.Gradebook("sqlite:///:memory:")
def fin():
gb.db.close()
request.addfinalizer(fin)
return gb
@pytest.fixture
def assignment(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='markdown')
gradebook.add_solution_cell('solution1', 'p1', 'foo')
gradebook.add_solution_cell('test2', 'p1', 'foo')
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type='code')
gradebook.add_source_cell('test2', 'p1', 'foo', cell_type='markdown')
gradebook.add_source_cell('solution1', 'p1', 'foo', cell_type='code')
return gradebook
def test_init(gradebook):
assert gradebook.students == []
assert gradebook.assignments == []
#### Test students
def test_add_student(gradebook):
s = gradebook.add_student('12345')
assert s.id == '12345'
assert gradebook.students == [s]
# try adding a duplicate student
with pytest.raises(InvalidEntry):
gradebook.add_student('12345')
# try adding a student with arguments
s = gradebook.add_student('6789', last_name="Bar", first_name="Foo", email="[email protected]")
assert s.id == '6789'
assert s.last_name == "Bar"
assert s.first_name == "Foo"
assert s.email == "[email protected]"
def test_add_duplicate_student(gradebook):
# we also need this test because this will cause an IntegrityError
# under the hood rather than a FlushError
gradebook.add_student('12345')
with pytest.raises(InvalidEntry):
gradebook.add_student('12345')
def test_find_student(gradebook):
s1 = gradebook.add_student('12345')
assert gradebook.find_student('12345') == s1
s2 = gradebook.add_student('abcd')
assert gradebook.find_student('12345') == s1
assert gradebook.find_student('abcd') == s2
def test_find_nonexistant_student(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_student('12345')
def test_remove_student(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
assignment.remove_student('hacker123')
with pytest.raises(MissingEntry):
assignment.find_submission('foo', 'hacker123')
with pytest.raises(MissingEntry):
assignment.find_student('hacker123')
def test_update_or_create_student(gradebook):
# first test creating it
s1 = gradebook.update_or_create_student('hacker123')
assert gradebook.find_student('hacker123') == s1
assert s1.first_name is None
# now test finding/updating it
s2 = gradebook.update_or_create_student('hacker123', first_name='Alyssa')
assert s1 == s2
assert s2.first_name == 'Alyssa'
#### Test assignments
def test_add_assignment(gradebook):
a = gradebook.add_assignment('foo')
assert a.name == 'foo'
assert gradebook.assignments == [a]
# try adding a duplicate assignment
with pytest.raises(InvalidEntry):
gradebook.add_assignment('foo')
# try adding an assignment with arguments
now = datetime.now()
a = gradebook.add_assignment('bar', duedate=now)
assert a.name == 'bar'
assert a.duedate == now
# try adding with a string timestamp
a = gradebook.add_assignment('baz', duedate=now.isoformat())
assert a.name == 'baz'
assert a.duedate == now
def test_add_duplicate_assignment(gradebook):
gradebook.add_assignment('foo')
with pytest.raises(InvalidEntry):
gradebook.add_assignment('foo')
def test_find_assignment(gradebook):
a1 = gradebook.add_assignment('foo')
assert gradebook.find_assignment('foo') == a1
a2 = gradebook.add_assignment('bar')
assert gradebook.find_assignment('foo') == a1
assert gradebook.find_assignment('bar') == a2
def test_find_nonexistant_assignment(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_assignment('foo')
def test_remove_assignment(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
notebooks = assignment.find_assignment('foo').notebooks
grade_cells = [x for nb in notebooks for x in nb.grade_cells]
solution_cells = [x for nb in notebooks for x in nb.solution_cells]
source_cells = [x for nb in notebooks for x in nb.source_cells]
assignment.remove_assignment('foo')
for nb in notebooks:
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade_cell in grade_cells:
assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == []
for solution_cell in solution_cells:
assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == []
for source_cell in source_cells:
assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_assignment('foo')
assert assignment.find_student('hacker123').submissions == []
def test_update_or_create_assignment(gradebook):
# first test creating it
a1 = gradebook.update_or_create_assignment('foo')
assert gradebook.find_assignment('foo') == a1
assert a1.duedate is None
# now test finding/updating it
a2 = gradebook.update_or_create_assignment('foo', duedate="2015-02-02 14:58:23.948203 PST")
assert a1 == a2
assert a2.duedate == utils.parse_utc("2015-02-02 14:58:23.948203 PST")
#### Test notebooks
def test_add_notebook(gradebook):
a = gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
assert n.name == 'p1'
assert n.assignment == a
assert a.notebooks == [n]
# try adding a duplicate assignment
with pytest.raises(InvalidEntry):
gradebook.add_notebook('p1', 'foo')
def test_add_duplicate_notebook(gradebook):
# it should be ok to add a notebook with the same name, as long as
# it's for different assignments
gradebook.add_assignment('foo')
gradebook.add_assignment('bar')
n1 = gradebook.add_notebook('p1', 'foo')
n2 = gradebook.add_notebook('p1', 'bar')
assert n1.id != n2.id
# but not ok to add a notebook with the same name for the same assignment
with pytest.raises(InvalidEntry):
gradebook.add_notebook('p1', 'foo')
def test_find_notebook(gradebook):
gradebook.add_assignment('foo')
n1 = gradebook.add_notebook('p1', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
n2 = gradebook.add_notebook('p2', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
assert gradebook.find_notebook('p2', 'foo') == n2
def test_find_nonexistant_notebook(gradebook):
# check that it doesn't find it when there is nothing in the db
with pytest.raises(MissingEntry):
gradebook.find_notebook('p1', 'foo')
# check that it doesn't find it even if the assignment exists
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_notebook('p1', 'foo')
def test_update_or_create_notebook(gradebook):
# first test creating it
gradebook.add_assignment('foo')
n1 = gradebook.update_or_create_notebook('p1', 'foo')
assert gradebook.find_notebook('p1', 'foo') == n1
# now test finding/updating it
n2 = gradebook.update_or_create_notebook('p1', 'foo')
assert n1 == n2
def test_remove_notebook(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
notebooks = assignment.find_assignment('foo').notebooks
for nb in notebooks:
grade_cells = [x for x in nb.grade_cells]
solution_cells = [x for x in nb.solution_cells]
source_cells = [x for x in nb.source_cells]
assignment.remove_notebook(nb.name, 'foo')
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade_cell in grade_cells:
assert assignment.db.query(api.GradeCell).filter(api.GradeCell.id == grade_cell.id).all() == []
for solution_cell in solution_cells:
assert assignment.db.query(api.SolutionCell).filter(api.SolutionCell.id == solution_cell.id).all() == []
for source_cell in source_cells:
assert assignment.db.query(api.SourceCell).filter(api.SourceCell.id == source_cell.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_notebook(nb.name, 'foo')
#### Test grade cells
def test_add_grade_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
gc = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown')
assert gc.name == 'test1'
assert gc.max_score == 2
assert gc.cell_type == 'markdown'
assert n.grade_cells == [gc]
assert gc.notebook == n
def test_add_grade_cell_with_args(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc = gradebook.add_grade_cell(
'test1', 'p1', 'foo',
max_score=3, cell_type="code")
assert gc.name == 'test1'
assert gc.max_score == 3
assert gc.cell_type == "code"
def test_create_invalid_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_grade_cell(
'test1', 'p1', 'foo',
max_score=3, cell_type="something")
def test_add_duplicate_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
with pytest.raises(InvalidEntry):
gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown')
def test_find_grade_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc1 = gradebook.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
gc2 = gradebook.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='code')
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
assert gradebook.find_grade_cell('test2', 'p1', 'foo') == gc2
def test_find_nonexistant_grade_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_grade_cell('test1', 'p1', 'foo')
def test_update_or_create_grade_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gc1 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='code')
assert gc1.max_score == 2
assert gc1.cell_type == 'code'
assert gradebook.find_grade_cell('test1', 'p1', 'foo') == gc1
# now test finding/updating it
gc2 = gradebook.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=3)
assert gc1 == gc2
assert gc1.max_score == 3
assert gc1.cell_type == 'code'
#### Test solution cells
def test_add_solution_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_solution_cell('test1', 'p1', 'foo')
assert sc.name == 'test1'
assert n.solution_cells == [sc]
assert sc.notebook == n
def test_add_duplicate_solution_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_solution_cell('test1', 'p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_solution_cell('test1', 'p1', 'foo')
def test_find_solution_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.add_solution_cell('test1', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
sc2 = gradebook.add_solution_cell('test2', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
assert gradebook.find_solution_cell('test2', 'p1', 'foo') == sc2
def test_find_nonexistant_solution_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_solution_cell('test1', 'p1', 'foo')
def test_update_or_create_solution_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo')
assert gradebook.find_solution_cell('test1', 'p1', 'foo') == sc1
# now test finding/updating it
sc2 = gradebook.update_or_create_solution_cell('test1', 'p1', 'foo')
assert sc1 == sc2
#### Test source cells
def test_add_source_cell(gradebook):
gradebook.add_assignment('foo')
n = gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
assert sc.name == 'test1'
assert sc.cell_type == 'code'
assert n.source_cells == [sc]
assert sc.notebook == n
def test_add_source_cell_with_args(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc = gradebook.add_source_cell(
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="code", checksum="abcde")
assert sc.name == 'test1'
assert sc.source == "blah blah blah"
assert sc.cell_type == "code"
assert sc.checksum == "abcde"
def test_create_invalid_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(InvalidEntry):
gradebook.add_source_cell(
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="something", checksum="abcde")
def test_add_duplicate_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
with pytest.raises(InvalidEntry):
gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
def test_find_source_cell(gradebook):
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.add_source_cell('test1', 'p1', 'foo', cell_type="code")
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
sc2 = gradebook.add_source_cell('test2', 'p1', 'foo', cell_type="code")
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
assert gradebook.find_source_cell('test2', 'p1', 'foo') == sc2
def test_find_nonexistant_source_cell(gradebook):
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
gradebook.add_assignment('foo')
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
gradebook.add_notebook('p1', 'foo')
with pytest.raises(MissingEntry):
gradebook.find_source_cell('test1', 'p1', 'foo')
def test_update_or_create_source_cell(gradebook):
# first test creating it
gradebook.add_assignment('foo')
gradebook.add_notebook('p1', 'foo')
sc1 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', cell_type='code')
assert sc1.cell_type == 'code'
assert gradebook.find_source_cell('test1', 'p1', 'foo') == sc1
# now test finding/updating it
assert sc1.checksum == None
sc2 = gradebook.update_or_create_source_cell('test1', 'p1', 'foo', checksum="123456")
assert sc1 == sc2
assert sc1.cell_type == 'code'
assert sc1.checksum == "123456"
#### Test submissions
def test_add_submission(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
s1 = assignment.add_submission('foo', 'hacker123')
s2 = assignment.add_submission('foo', 'bitdiddle')
assert assignment.assignment_submissions('foo') == [s2, s1]
assert assignment.student_submissions('hacker123') == [s1]
assert assignment.student_submissions('bitdiddle') == [s2]
assert assignment.find_submission('foo', 'hacker123') == s1
assert assignment.find_submission('foo', 'bitdiddle') == s2
def test_add_duplicate_submission(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
with pytest.raises(InvalidEntry):
assignment.add_submission('foo', 'hacker123')
def test_remove_submission(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
submission = assignment.find_submission('foo', 'hacker123')
notebooks = submission.notebooks
grades = [x for nb in notebooks for x in nb.grades]
comments = [x for nb in notebooks for x in nb.comments]
assignment.remove_submission('foo', 'hacker123')
for nb in notebooks:
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade in grades:
assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == []
for comment in comments:
assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_submission('foo', 'hacker123')
def test_update_or_create_submission(assignment):
assignment.add_student('hacker123')
s1 = assignment.update_or_create_submission('foo', 'hacker123')
assert s1.timestamp is None
s2 = assignment.update_or_create_submission('foo', 'hacker123', timestamp="2015-02-02 14:58:23.948203 PST")
assert s1 == s2
assert s2.timestamp == utils.parse_utc("2015-02-02 14:58:23.948203 PST")
def test_find_submission_notebook(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
with pytest.raises(MissingEntry):
assignment.find_submission_notebook('p2', 'foo', 'hacker123')
n2 = assignment.find_submission_notebook('p1', 'foo', 'hacker123')
assert n1 == n2
def test_find_submission_notebook_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
with pytest.raises(MissingEntry):
assignment.find_submission_notebook_by_id('12345')
n2 = assignment.find_submission_notebook_by_id(n1.id)
assert n1 == n2
def test_remove_submission_notebook(assignment):
assignment.add_student('hacker123')
assignment.add_submission('foo', 'hacker123')
submission = assignment.find_submission('foo', 'hacker123')
notebooks = submission.notebooks
for nb in notebooks:
grades = [x for x in nb.grades]
comments = [x for x in nb.comments]
assignment.remove_submission_notebook(nb.name, 'foo', 'hacker123')
assert assignment.db.query(api.SubmittedNotebook).filter(api.SubmittedNotebook.id == nb.id).all() == []
for grade in grades:
assert assignment.db.query(api.Grade).filter(api.Grade.id == grade.id).all() == []
for comment in comments:
assert assignment.db.query(api.Comment).filter(api.Comment.id == comment.id).all() == []
with pytest.raises(MissingEntry):
assignment.find_submission_notebook(nb.name, 'foo', 'hacker123')
def test_find_grade(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
grades = n1.grades
for g1 in grades:
g2 = assignment.find_grade(g1.name, 'p1', 'foo', 'hacker123')
assert g1 == g2
with pytest.raises(MissingEntry):
assignment.find_grade('asdf', 'p1', 'foo', 'hacker123')
def test_find_grade_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
grades = n1.grades
for g1 in grades:
g2 = assignment.find_grade_by_id(g1.id)
assert g1 == g2
with pytest.raises(MissingEntry):
assignment.find_grade_by_id('12345')
def test_find_comment(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
comments = n1.comments
for c1 in comments:
c2 = assignment.find_comment(c1.name, 'p1', 'foo', 'hacker123')
assert c1 == c2
with pytest.raises(MissingEntry):
assignment.find_comment('asdf', 'p1', 'foo', 'hacker123')
def test_find_comment_by_id(assignment):
assignment.add_student('hacker123')
s = assignment.add_submission('foo', 'hacker123')
n1, = s.notebooks
comments = n1.comments
for c1 in comments:
c2 = assignment.find_comment_by_id(c1.id)
assert c1 == c2
with pytest.raises(MissingEntry):
assignment.find_comment_by_id('12345')
### Test average scores
def test_average_assignment_score(assignment):
assert assignment.average_assignment_score('foo') == 0.0
assert assignment.average_assignment_code_score('foo') == 0.0
assert assignment.average_assignment_written_score('foo') == 0.0
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
assert assignment.average_assignment_score('foo') == 0.0
assert assignment.average_assignment_code_score('foo') == 0.0
assert assignment.average_assignment_written_score('foo') == 0.0
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
assert assignment.average_assignment_score('foo') == 2.25
assert assignment.average_assignment_code_score('foo') == 0.75
assert assignment.average_assignment_written_score('foo') == 1.5
def test_average_notebook_score(assignment):
assert assignment.average_notebook_score('p1', 'foo') == 0
assert assignment.average_notebook_code_score('p1', 'foo') == 0
assert assignment.average_notebook_written_score('p1', 'foo') == 0
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
assert assignment.average_notebook_score('p1', 'foo') == 0.0
assert assignment.average_notebook_code_score('p1', 'foo') == 0.0
assert assignment.average_notebook_written_score('p1', 'foo') == 0.0
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
assert assignment.average_notebook_score('p1', 'foo') == 2.25
assert assignment.average_notebook_code_score('p1', 'foo') == 0.75
assert assignment.average_notebook_written_score('p1', 'foo') == 1.5
## Test mass dictionary queries
def test_student_dicts(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
assignment.add_student('louisreasoner')
assignment.add_submission('foo', 'hacker123')
assignment.add_submission('foo', 'bitdiddle')
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
students = assignment.student_dicts()
a = sorted(students, key=lambda x: x["id"])
b = sorted([x.to_dict() for x in assignment.students], key=lambda x: x["id"])
assert a == b
def test_notebook_submission_dicts(assignment):
assignment.add_student('hacker123')
assignment.add_student('bitdiddle')
s1 = assignment.add_submission('foo', 'hacker123')
s2 = assignment.add_submission('foo', 'bitdiddle')
s1.flagged = True
s2.flagged = False
g1 = assignment.find_grade("test1", "p1", "foo", "hacker123")
g2 = assignment.find_grade("test2", "p1", "foo", "hacker123")
g3 = assignment.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = assignment.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
assignment.db.commit()
notebook = assignment.find_notebook("p1", "foo")
submissions = assignment.notebook_submission_dicts("p1", "foo")
a = sorted(submissions, key=lambda x: x["id"])
b = sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"])
assert a == b
| bsd-3-clause |
tojon/treeherder | treeherder/webapp/api/performance_data.py | 2 | 14534 | import datetime
import time
from collections import defaultdict
import django_filters
from django.conf import settings
from rest_framework import (exceptions,
filters,
pagination,
viewsets)
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from treeherder.model import models
from treeherder.perf.alerts import get_alert_properties
from treeherder.perf.models import (PerformanceAlert,
PerformanceAlertSummary,
PerformanceBugTemplate,
PerformanceDatum,
PerformanceFramework,
PerformanceSignature)
from treeherder.webapp.api.permissions import IsStaffOrReadOnly
from .performance_serializers import (PerformanceAlertSerializer,
PerformanceAlertSummarySerializer,
PerformanceBugTemplateSerializer,
PerformanceFrameworkSerializer)
class PerformanceSignatureViewSet(viewsets.ViewSet):
def list(self, request, project):
repository = models.Repository.objects.get(name=project)
signature_data = PerformanceSignature.objects.filter(
repository=repository).select_related(
'parent_signature__signature_hash', 'option_collection',
'platform')
parent_signature_hashes = request.query_params.getlist('parent_signature')
if parent_signature_hashes:
parent_signatures = PerformanceSignature.objects.filter(
repository=repository,
signature_hash__in=parent_signature_hashes)
signature_data = signature_data.filter(
parent_signature__in=parent_signatures)
if not int(request.query_params.get('subtests', True)):
signature_data = signature_data.filter(parent_signature__isnull=True)
signature_ids = request.query_params.getlist('id')
if signature_ids:
signature_data = signature_data.filter(id__in=map(int,
signature_ids))
signature_hashes = request.query_params.getlist('signature')
if signature_hashes:
signature_data = signature_data.filter(
signature_hash__in=signature_hashes)
frameworks = request.query_params.getlist('framework')
if frameworks:
signature_data = signature_data.filter(
framework__in=frameworks)
interval = request.query_params.get('interval')
start_date = request.query_params.get('start_date') # 'YYYY-MM-DDTHH:MM:SS
end_date = request.query_params.get('end_date') # 'YYYY-MM-DDTHH:MM:SS'
if interval and (start_date or end_date):
return Response({"message": "Provide either interval only -or- start (and end) date"},
status=HTTP_400_BAD_REQUEST)
if interval:
signature_data = signature_data.filter(
last_updated__gte=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
if start_date:
signature_data = signature_data.filter(last_updated__gte=start_date)
if end_date:
signature_data = signature_data.filter(last_updated__lte=end_date)
platform = request.query_params.get('platform')
if platform:
platforms = models.MachinePlatform.objects.filter(
platform=platform)
signature_data = signature_data.filter(
platform__in=platforms)
ret = {}
for (id, signature_hash, option_collection_hash, platform, framework,
suite, test, lower_is_better, extra_options,
has_subtests, parent_signature_hash) in signature_data.values_list(
'id',
'signature_hash',
'option_collection__option_collection_hash',
'platform__platform', 'framework', 'suite',
'test', 'lower_is_better',
'extra_options', 'has_subtests',
'parent_signature__signature_hash').distinct():
ret[signature_hash] = {
'id': id,
'framework_id': framework,
'option_collection_hash': option_collection_hash,
'machine_platform': platform,
'suite': suite
}
if not lower_is_better:
# almost always true, save some banwidth by assuming that by
# default
ret[signature_hash]['lower_is_better'] = False
if test:
# test may be empty in case of a summary test, leave it empty
# then
ret[signature_hash]['test'] = test
if has_subtests:
ret[signature_hash]['has_subtests'] = True
if parent_signature_hash:
# this value is often null, save some bandwidth by excluding
# it if not present
ret[signature_hash]['parent_signature'] = parent_signature_hash
if extra_options:
# extra_options stored as charField but api returns as list
ret[signature_hash]['extra_options'] = extra_options.split(' ')
return Response(ret)
class PerformancePlatformViewSet(viewsets.ViewSet):
"""
All platforms for a particular branch that have performance data
"""
def list(self, request, project):
signature_data = PerformanceSignature.objects.filter(
repository__name=project)
interval = request.query_params.get('interval')
if interval:
signature_data = signature_data.filter(
last_updated__gte=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
frameworks = request.query_params.getlist('framework')
if frameworks:
signature_data = signature_data.filter(
framework__in=frameworks)
return Response(signature_data.values_list(
'platform__platform', flat=True).distinct())
class PerformanceFrameworkViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceFramework.objects.all()
serializer_class = PerformanceFrameworkSerializer
filter_backends = [filters.OrderingFilter]
ordering = 'id'
class PerformanceDatumViewSet(viewsets.ViewSet):
"""
This view serves performance test result data
"""
def list(self, request, project):
repository = models.Repository.objects.get(name=project)
signature_hashes = request.query_params.getlist("signatures")
push_ids = request.query_params.getlist("push_id")
try:
job_ids = [int(job_id) for job_id in
request.query_params.getlist("job_id")]
except ValueError:
return Response({"message": "Job id(s) must be specified as integers"},
status=HTTP_400_BAD_REQUEST)
if not (signature_hashes or push_ids or job_ids):
raise exceptions.ValidationError('Need to specify either '
'signatures, push_id, or '
'job_id')
datums = PerformanceDatum.objects.filter(
repository=repository).select_related(
'signature__signature_hash').order_by('push_timestamp')
if signature_hashes:
signature_ids = PerformanceSignature.objects.filter(
repository=repository,
signature_hash__in=signature_hashes).values_list('id', flat=True)
datums = datums.filter(signature__id__in=list(signature_ids))
if push_ids:
datums = datums.filter(push_id__in=push_ids)
if job_ids:
datums = datums.filter(job_id__in=job_ids)
frameworks = request.query_params.getlist('framework')
if frameworks:
datums = datums.filter(
signature__framework__in=frameworks)
interval = request.query_params.get('interval')
start_date = request.query_params.get('start_date') # 'YYYY-MM-DDTHH:MM:SS
end_date = request.query_params.get('end_date') # 'YYYY-MM-DDTHH:MM:SS'
if interval and (start_date or end_date):
return Response({"message": "Provide either interval only -or- start (and end) date"},
status=HTTP_400_BAD_REQUEST)
if interval:
datums = datums.filter(
push_timestamp__gt=datetime.datetime.utcfromtimestamp(
int(time.time() - int(interval))))
if start_date:
datums = datums.filter(push_timestamp__gt=start_date)
if end_date:
datums = datums.filter(push_timestamp__lt=end_date)
ret = defaultdict(list)
values_list = datums.values_list(
'id', 'signature_id', 'signature__signature_hash', 'job_id', 'push_id',
'push_timestamp', 'value')
for (id, signature_id, signature_hash, job_id, push_id,
push_timestamp, value) in values_list:
ret[signature_hash].append({
'id': id,
'signature_id': signature_id,
'job_id': job_id,
'push_id': push_id,
'push_timestamp': int(time.mktime(push_timestamp.timetuple())),
'value': round(value, 2) # round to 2 decimal places
})
return Response(ret)
class AlertSummaryPagination(pagination.PageNumberPagination):
ordering = ('-last_updated', '-id')
page_size = 10
class PerformanceAlertSummaryViewSet(viewsets.ModelViewSet):
"""ViewSet for the performance alert summary model"""
queryset = PerformanceAlertSummary.objects.filter(repository__active_status='active').prefetch_related(
'alerts', 'alerts__series_signature',
'repository',
'alerts__series_signature__platform',
'alerts__series_signature__option_collection',
'alerts__series_signature__option_collection__option')
permission_classes = (IsStaffOrReadOnly,)
serializer_class = PerformanceAlertSummarySerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['id', 'status', 'framework', 'repository',
'alerts__series_signature__signature_hash']
ordering = ('-last_updated', '-id')
pagination_class = AlertSummaryPagination
def create(self, request, *args, **kwargs):
data = request.data
alert_summary, _ = PerformanceAlertSummary.objects.get_or_create(
repository_id=data['repository_id'],
framework=PerformanceFramework.objects.get(id=data['framework_id']),
push_id=data['push_id'],
prev_push_id=data['prev_push_id'],
defaults={
'manually_created': True,
'last_updated': datetime.datetime.now()
})
return Response({"alert_summary_id": alert_summary.id})
class PerformanceAlertViewSet(viewsets.ModelViewSet):
queryset = PerformanceAlert.objects.all()
permission_classes = (IsStaffOrReadOnly,)
serializer_class = PerformanceAlertSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['id']
ordering = ('-id')
class AlertPagination(pagination.CursorPagination):
ordering = ('-id')
page_size = 10
pagination_class = AlertPagination
def update(self, request, *args, **kwargs):
request.data['classifier'] = request.user.email
return super(PerformanceAlertViewSet, self).update(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
data = request.data
if 'summary_id' not in data or 'signature_id' not in data:
return Response({"message": "Summary and signature ids necessary "
"to create alert"}, status=HTTP_400_BAD_REQUEST)
summary = PerformanceAlertSummary.objects.get(
id=data['summary_id'])
signature = PerformanceSignature.objects.get(
id=data['signature_id'])
prev_range = signature.max_back_window
if not prev_range:
prev_range = settings.PERFHERDER_ALERTS_MAX_BACK_WINDOW
new_range = signature.fore_window
if not new_range:
new_range = settings.PERFHERDER_ALERTS_FORE_WINDOW
prev_data = PerformanceDatum.objects.filter(
signature=signature,
push_timestamp__lte=summary.prev_push.time).order_by(
'-push_timestamp').values_list('value', flat=True)[:prev_range]
new_data = PerformanceDatum.objects.filter(
signature=signature,
push_timestamp__gt=summary.prev_push.time).order_by(
'push_timestamp').values_list('value', flat=True)[:new_range]
if not prev_data or not new_data:
return Response({"message": "Insufficient data to create an "
"alert"}, status=HTTP_400_BAD_REQUEST)
prev_value = sum(prev_data)/len(prev_data)
new_value = sum(new_data)/len(new_data)
alert_properties = get_alert_properties(prev_value, new_value,
signature.lower_is_better)
alert, _ = PerformanceAlert.objects.get_or_create(
summary=summary,
series_signature=signature,
defaults={
'is_regression': alert_properties.is_regression,
'manually_created': True,
'amount_pct': alert_properties.pct_change,
'amount_abs': alert_properties.delta,
'prev_value': prev_value,
'new_value': new_value,
't_value': 1000
})
return Response({"alert_id": alert.id})
class PerformanceBugTemplateViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceBugTemplate.objects.all()
serializer_class = PerformanceBugTemplateSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ['framework']
| mpl-2.0 |
x111ong/django | tests/flatpages_tests/test_middleware.py | 290 | 8134 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware and should add a slash"
response = self.client.get('/flatpage')
self.assertRedirects(response, '/flatpage/', status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here')
self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301)
def test_redirect_fallback_flatpage_root(self):
"A flatpage at / should not cause a redirect loop when APPEND_SLASH is set"
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Root</p>")
| bsd-3-clause |
Diiaablo95/friendsNet | test/services_api_test_media_item.py | 1 | 6919 | import unittest
import json
import flask
import friendsNet.resources as resources
import friendsNet.database as database
DB_PATH = 'db/friendsNet_test.db'
ENGINE = database.Engine(DB_PATH)
COLLECTION_JSON = "application/vnd.collection+json"
HAL_JSON = "application/hal+json"
MEDIA_ITEM_PROFILE = "/profiles/media_item-profile"
#Tell Flask that I am running it in testing mode.
resources.app.config['TESTING'] = True
#Necessary for correct translation in url_for
resources.app.config['SERVER_NAME'] = 'localhost:5000'
#Database Engine utilized in our testing
resources.app.config.update({'Engine': ENGINE})
class ResourcesAPITestCase(unittest.TestCase):
#INITIATION AND TEARDOWN METHODS
@classmethod
def setUpClass(cls):
''' Creates the database structure. Removes first any preexisting database file.'''
print "Testing ", cls.__name__
ENGINE.remove_database()
ENGINE.create_tables()
@classmethod
def tearDownClass(cls):
'''Remove the testing database.'''
print "Testing ENDED for ", cls.__name__
ENGINE.remove_database()
def setUp(self):
'''Populates the database.'''
#This method loads the initial values from friendsNet_data_db.sql
ENGINE.populate_tables()
#Activate app_context for using url_for
self.app_context = resources.app.app_context()
self.app_context.push()
#Create a test client
self.client = resources.app.test_client()
def tearDown(self):
'''
Remove all records from database.
'''
ENGINE.clear()
self.app_context.pop()
class MediaItemTestCase(ResourcesAPITestCase):
resp_get = {
"id" : 1,
"media_item_type" : 0,
"url" : "/friendsNet/media_uploads/media1.jpg",
"description" : "Flowers are wonderful!",
"_links" : {
"self" : {"href" : "/friendsNet/api/media/1/", "profile" : "/profiles/media_item-profile"},
"media list" : {"href" : "/friendsNet/api/media/"}
},
"template" : {
"data" : [
{"name" : "description", "value" : "", "prompt" : "Media item description", "required" : "false"}
]
}
}
media_patch_correct = {
"template" : {
"data" : [
{"name" : "description", "value" : "New description!"}
]
}
}
media_patch_empty = {
"template" : {
"data" : []
}
}
def setUp(self):
super(MediaItemTestCase, self).setUp()
self.url = resources.api.url_for(resources.Media_item, media_id = 1, _external = False)
self.url_wrong = resources.api.url_for(resources.Media_item, media_id = 999, _external = False)
#TEST URL
def test_url(self):
#Checks that the URL points to the right resource
_url = '/friendsNet/api/media/1/'
print '('+self.test_url.__name__+')', self.test_url.__doc__
with resources.app.test_request_context(_url):
rule = flask.request.url_rule
view_point = resources.app.view_functions[rule.endpoint].view_class
self.assertEquals(view_point, resources.Media_item)
def test_wrong_url(self):
#Checks that GET Friendship returns correct status code if given a wrong id
resp = self.client.get(self.url_wrong, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 404)
data = json.loads(resp.data)
href = data["resource_url"] #test HREF
self.assertEquals(href, self.url_wrong)
error = data["code"]
self.assertEquals(error, 404)
#TEST GET
#200 + MIMETYPE & PROFILE
def test_get_media_item(self):
print '('+self.test_get_media_item.__name__+')', self.test_get_media_item.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get, data)
self.assertEqual(resp.headers.get("Content-Type", None), HAL_JSON)
#404
def test_get_not_existing_media_item(self):
print '('+self.test_get_not_existing_media_item.__name__+')', self.test_get_not_existing_media_item.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_wrong, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 404)
#TEST PATCH
#204
def test_patch_media_item(self):
print '('+self.test_patch_media_item.__name__+')', self.test_patch_media_item.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.media_patch_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
resp2 = self.client.get(self.url, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp2.status_code, 200)
data = json.loads(resp2.data)
new_value = data["description"]
self.assertEquals(new_value, self.media_patch_correct["template"]["data"][0]["value"])
#PATCH EMPTY
def test_patch_empty_media_item(self):
print '('+self.test_patch_empty_media_item.__name__+')', self.test_patch_empty_media_item.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.media_patch_empty), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
#404
def test_patch_not_existing_media_item(self):
print '('+self.test_patch_not_existing_media_item.__name__+')', self.test_patch_not_existing_media_item.__doc__
resp = self.client.patch(self.url_wrong, data = json.dumps(self.media_patch_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
#415
def test_patch_wrong_header_media_item(self):
print '('+self.test_patch_wrong_header_media_item.__name__+')', self.test_patch_wrong_header_media_item.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.media_patch_correct))
self.assertEquals(resp.status_code, 415)
#TEST DELETE
#204
def test_delete_existing_media_item(self):
print '('+self.test_delete_existing_media_item.__name__+')', self.test_delete_existing_media_item.__doc__
resp = self.client.delete(self.url, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
#404
def test_delete_not_existing_media_item(self):
print '('+self.test_delete_not_existing_media_item.__name__+')', self.test_delete_not_existing_media_item.__doc__
resp = self.client.delete(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
if __name__ == '__main__':
unittest.main()
print 'Start running tests' | gpl-3.0 |
kxz/waapuro | setup.py | 1 | 1353 | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test
class Tox(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name='waapuro',
description='A dead-simple hiragana and katakana romanization library',
version='1.0.1',
author='Kevin Xiwei Zheng',
author_email='[email protected]',
url='https://github.com/kxz/waapuro',
license='X11',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: Japanese',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries'],
keywords='japanese kana hiragana katakana romanization',
packages=find_packages(),
install_requires=[
'future'],
tests_require=[
'tox'],
cmdclass={
'test': Tox})
| mit |
LeandroRoberto/acrobatasdovento | node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
grow/pygrow | grow/documents/document_fields_test.py | 1 | 1484 | """Tests for the document fields."""
import copy
import unittest
from grow.documents import document_fields
class DocumentFieldsTestCase(unittest.TestCase):
def testContains(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
}, None)
self.assertEquals(True, 'foo' in doc_fields)
self.assertEquals(False, 'bar' in doc_fields)
def testGet(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
}, None)
self.assertEquals('bar', doc_fields['foo'])
self.assertEquals('baz', doc_fields.get('bar', 'baz'))
def testGetItem(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
}, None)
self.assertEquals('bar', doc_fields['foo'])
with self.assertRaises(KeyError):
doc_fields['bar']
def testLen(self):
doc_fields = document_fields.DocumentFields({
'foo': 'bar',
'bar': 'baz',
}, None)
self.assertEquals(2, len(doc_fields))
def test_update(self):
"""Test that updates properly overwrite and are untagged."""
doc_fields = document_fields.DocumentFields({
'foo@': 'bar',
})
self.assertEquals('bar', doc_fields['foo'])
doc_fields.update({
'foo@': 'bbq',
})
self.assertEquals('bbq', doc_fields['foo'])
if __name__ == '__main__':
unittest.main()
| mit |
kennethgillen/ansible | lib/ansible/plugins/cache/pickle.py | 27 | 1645 | # (c) 2017, Brian Coca
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
cache: yaml
short_description: File backed, using Python's pickle.
description:
- File backed cache that uses Python's pickle serialization as a format, the files are per host.
version_added: "2.3"
author: Brian Coca (@bcoca)
'''
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import cPickle as pickle
except ImportError:
import pickle
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by pickle files.
"""
def _load(self, filepath):
# Pickle is a binary format
with open(filepath, 'rb') as f:
return pickle.load(f)
def _dump(self, value, filepath):
with open(filepath, 'wb') as f:
# Use pickle protocol 2 which is compatible with Python 2.3+.
pickle.dump(value, f, protocol=2)
| gpl-3.0 |
imply/chuu | third_party/closure_linter/closure_linter/statetracker.py | 135 | 31214 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import re
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class DocFlag(object):
"""Generic doc flag object.
Attribute:
flag_type: param, return, define, type, etc.
flag_token: The flag token.
type_start_token: The first token specifying the flag type,
including braces.
type_end_token: The last token specifying the flag type,
including braces.
type: The type spec.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
description_end_token: The end token in the description.
description: The description.
"""
# Please keep these lists alphabetized.
# The list of standard jsdoc tags is from
STANDARD_DOC = frozenset([
'author',
'bug',
'const',
'constructor',
'define',
'deprecated',
'enum',
'export',
'extends',
'externs',
'fileoverview',
'implements',
'implicitCast',
'interface',
'lends',
'license',
'noalias',
'nocompile',
'nosideeffects',
'override',
'owner',
'param',
'preserve',
'private',
'return',
'see',
'supported',
'template',
'this',
'type',
'typedef',
])
ANNOTATION = frozenset(['preserveTry', 'suppress'])
LEGAL_DOC = STANDARD_DOC | ANNOTATION
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
#
# Specific cases:
# - accessControls is supported by the compiler at the expression
# and method level to suppress warnings about private/protected
# access (method level applies to all references in the method).
# The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
'ambiguousFunctionDecl',
'checkRegExp',
'checkTypes',
'checkVars',
'const',
'constantProperty',
'deprecated',
'duplicate',
'es5Strict',
'externsValidation',
'extraProvide',
'extraRequire',
'fileoverviewTags',
'globalThis',
'internetExplorerChecks',
'invalidCasts',
'missingProperties',
'missingProvide',
'missingRequire',
'nonStandardJsDocs',
'strictModuleDepCheck',
'tweakValidation',
'typeInvalidation',
'undefinedNames',
'undefinedVars',
'underscore',
'unknownDefines',
'uselessCode',
'visibility',
'with'])
HAS_DESCRIPTION = frozenset([
'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
'preserve', 'return', 'supported'])
HAS_TYPE = frozenset([
'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
'suppress'])
TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type'])
HAS_NAME = frozenset(['param'])
EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
EMPTY_STRING = re.compile(r'^\s*$')
def __init__(self, flag_token):
"""Creates the DocFlag object and attaches it to the given start token.
Args:
flag_token: The starting token of the flag.
"""
self.flag_token = flag_token
self.flag_type = flag_token.string.strip().lstrip('@')
# Extract type, if applicable.
self.type = None
self.type_start_token = None
self.type_end_token = None
if self.flag_type in self.HAS_TYPE:
brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
Type.FLAG_ENDING_TYPES)
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
self.type = contents
self.type_start_token = brace
self.type_end_token = end_token
elif (self.flag_type in self.TYPE_ONLY and
flag_token.next.type not in Type.FLAG_ENDING_TYPES):
self.type_start_token = flag_token.next
self.type_end_token, self.type = _GetEndTokenAndContents(
self.type_start_token)
if self.type is not None:
self.type = self.type.strip()
# Extract name, if applicable.
self.name_token = None
self.name = None
if self.flag_type in self.HAS_NAME:
# Handle bad case, name could be immediately after flag token.
self.name_token = _GetNextIdentifierToken(flag_token)
# Handle good case, if found token is after type start, look for
# identifier after type end, since types contain identifiers.
if (self.type and self.name_token and
tokenutil.Compare(self.name_token, self.type_start_token) > 0):
self.name_token = _GetNextIdentifierToken(self.type_end_token)
if self.name_token:
self.name = self.name_token.string
# Extract description, if applicable.
self.description_start_token = None
self.description_end_token = None
self.description = None
if self.flag_type in self.HAS_DESCRIPTION:
search_start_token = flag_token
if self.name_token and self.type_end_token:
if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
search_start_token = self.type_end_token
else:
search_start_token = self.name_token
elif self.name_token:
search_start_token = self.name_token
elif self.type:
search_start_token = self.type_end_token
interesting_token = tokenutil.Search(search_start_token,
Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
self.description_start_token = interesting_token
self.description_end_token, self.description = (
_GetEndTokenAndContents(interesting_token))
class DocComment(object):
"""JavaScript doc comment object.
Attributes:
ordered_params: Ordered list of parameters documented.
start_token: The token that starts the doc comment.
end_token: The token that ends the doc comment.
suppressions: Map of suppression type to the token that added it.
"""
def __init__(self, start_token):
"""Create the doc comment object.
Args:
start_token: The first token in the doc comment.
"""
self.__params = {}
self.ordered_params = []
self.__flags = {}
self.start_token = start_token
self.end_token = None
self.suppressions = {}
self.invalidated = False
def Invalidate(self):
"""Indicate that the JSDoc is well-formed but we had problems parsing it.
This is a short-circuiting mechanism so that we don't emit false
positives about well-formed doc comments just because we don't support
hot new syntaxes.
"""
self.invalidated = True
def IsInvalidated(self):
"""Test whether Invalidate() has been called."""
return self.invalidated
def AddParam(self, name, param_type):
"""Add a new documented parameter.
Args:
name: The name of the parameter to document.
param_type: The parameter's declared JavaScript type.
"""
self.ordered_params.append(name)
self.__params[name] = param_type
def AddSuppression(self, token):
"""Add a new error suppression flag.
Args:
token: The suppression flag token.
"""
#TODO(user): Error if no braces
brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
[Type.DOC_FLAG])
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
for suppression in contents.split('|'):
self.suppressions[suppression] = token
def SuppressionOnly(self):
"""Returns whether this comment contains only suppression flags."""
for flag_type in self.__flags.keys():
if flag_type != 'suppress':
return False
return True
def AddFlag(self, flag):
"""Add a new document flag.
Args:
flag: DocFlag object.
"""
self.__flags[flag.flag_type] = flag
def InheritsDocumentation(self):
"""Test if the jsdoc implies documentation inheritance.
Returns:
True if documentation may be pulled off the superclass.
"""
return self.HasFlag('inheritDoc') or self.HasFlag('override')
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
Args:
flag_type: The type of the flag to check.
Returns:
True if the flag is set.
"""
return flag_type in self.__flags
def GetFlag(self, flag_type):
"""Gets the last flag of the given type.
Args:
flag_type: The type of the flag to get.
Returns:
The last instance of the given flag type in this doc comment.
"""
return self.__flags[flag_type]
def CompareParameters(self, params):
"""Computes the edit distance and list from the function params to the docs.
Uses the Levenshtein edit distance algorithm, with code modified from
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
Args:
params: The parameter list for the function declaration.
Returns:
The edit distance, the edit list.
"""
source_len, target_len = len(self.ordered_params), len(params)
edit_lists = [[]]
distance = [[]]
for i in range(target_len+1):
edit_lists[0].append(['I'] * i)
distance[0].append(i)
for j in range(1, source_len+1):
edit_lists.append([['D'] * j])
distance.append([j])
for i in range(source_len):
for j in range(target_len):
cost = 1
if self.ordered_params[i] == params[j]:
cost = 0
deletion = distance[i][j+1] + 1
insertion = distance[i+1][j] + 1
substitution = distance[i][j] + cost
edit_list = None
best = None
if deletion <= insertion and deletion <= substitution:
# Deletion is best.
best = deletion
edit_list = list(edit_lists[i][j+1])
edit_list.append('D')
elif insertion <= substitution:
# Insertion is best.
best = insertion
edit_list = list(edit_lists[i+1][j])
edit_list.append('I')
edit_lists[i+1].append(edit_list)
else:
# Substitution is best.
best = substitution
edit_list = list(edit_lists[i][j])
if cost:
edit_list.append('S')
else:
edit_list.append('=')
edit_lists[i+1].append(edit_list)
distance[i+1].append(best)
return distance[source_len][target_len], edit_lists[source_len][target_len]
def __repr__(self):
"""Returns a string representation of this object.
Returns:
A string representation of this object.
"""
return '<DocComment: %s, %s>' % (str(self.__params), str(self.__flags))
#
# Helper methods used by DocFlag and DocComment to parse out flag information.
#
def _GetMatchingEndBraceAndContents(start_brace):
"""Returns the matching end brace and contents between the two braces.
If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
that token is used as the matching ending token. Contents will have all
comment prefixes stripped out of them, and all comment prefixes in between the
start and end tokens will be split out into separate DOC_PREFIX tokens.
Args:
start_brace: The DOC_START_BRACE token immediately before desired contents.
Returns:
The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
of the contents between the matching tokens, minus any comment prefixes.
"""
open_count = 1
close_count = 0
contents = []
# We don't consider the start brace part of the type string.
token = start_brace.next
while open_count != close_count:
if token.type == Type.DOC_START_BRACE:
open_count += 1
elif token.type == Type.DOC_END_BRACE:
close_count += 1
if token.type != Type.DOC_PREFIX:
contents.append(token.string)
if token.type in Type.FLAG_ENDING_TYPES:
break
token = token.next
#Don't include the end token (end brace, end doc comment, etc.) in type.
token = token.previous
contents = contents[:-1]
return token, ''.join(contents)
def _GetNextIdentifierToken(start_token):
"""Searches for and returns the first identifier at the beginning of a token.
Searches each token after the start to see if it starts with an identifier.
If found, will split the token into at most 3 piecies: leading whitespace,
identifier, rest of token, returning the identifier token. If no identifier is
found returns None and changes no tokens. Search is abandoned when a
FLAG_ENDING_TYPE token is found.
Args:
start_token: The token to start searching after.
Returns:
The identifier token is found, None otherwise.
"""
token = start_token.next
while token and not token.type in Type.FLAG_ENDING_TYPES:
match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.match(
token.string)
if (match is not None and token.type == Type.COMMENT and
len(token.string) == len(match.group(0))):
return token
token = token.next
return None
def _GetEndTokenAndContents(start_token):
"""Returns last content token and all contents before FLAG_ENDING_TYPE token.
Comment prefixes are split into DOC_PREFIX tokens and stripped from the
returned contents.
Args:
start_token: The token immediately before the first content token.
Returns:
The last content token and a string of all contents including start and
end tokens, with comment prefixes stripped.
"""
iterator = start_token
last_line = iterator.line_number
last_token = None
contents = ''
doc_depth = 0
while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
# ending of the description. This handles a case like:
#
# * @return {boolean} True
# *
# * Note: This is a sentence.
#
# The note is not part of the @return description, but there was
# no definitive ending token. Rather there was a line containing
# only a doc comment prefix or whitespace.
break
# b/2983692
# don't prematurely match against a @flag if inside a doc flag
# need to think about what is the correct behavior for unterminated
# inline doc flags
if (iterator.type == Type.DOC_START_BRACE and
iterator.next.type == Type.DOC_INLINE_FLAG):
doc_depth += 1
elif (iterator.type == Type.DOC_END_BRACE and
doc_depth > 0):
doc_depth -= 1
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
iterator = iterator.next
if iterator.line_number != last_line:
contents += '\n'
last_line = iterator.line_number
end_token = last_token
if DocFlag.EMPTY_STRING.match(contents):
contents = None
else:
# Strip trailing newline.
contents = contents[:-1]
return end_token, contents
class Function(object):
"""Data about a JavaScript function.
Attributes:
block_depth: Block depth the function began at.
doc: The DocComment associated with the function.
has_return: If the function has a return value.
has_this: If the function references the 'this' object.
is_assigned: If the function is part of an assignment.
is_constructor: If the function is a constructor.
name: The name of the function, whether given in the function keyword or
as the lvalue the function is assigned to.
"""
def __init__(self, block_depth, is_assigned, doc, name):
self.block_depth = block_depth
self.is_assigned = is_assigned
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
class StateTracker(object):
"""EcmaScript state tracker.
Tracks block depth, function names, etc. within an EcmaScript token stream.
"""
OBJECT_LITERAL = 'o'
CODE = 'c'
def __init__(self, doc_flag=DocFlag):
"""Initializes a JavaScript token stream state tracker.
Args:
doc_flag: An optional custom DocFlag used for validating
documentation flags.
"""
self._doc_flag = doc_flag
self.Reset()
def Reset(self):
"""Resets the state tracker to prepare for processing a new page."""
self._block_depth = 0
self._is_block_close = False
self._paren_depth = 0
self._functions = []
self._functions_by_name = {}
self._last_comment = None
self._doc_comment = None
self._cumulative_params = None
self._block_types = []
self._last_non_space_token = None
self._last_line = None
self._first_token = None
self._documented_identifiers = set()
def InFunction(self):
"""Returns true if the current token is within a function.
Returns:
True if the current token is within a function.
"""
return bool(self._functions)
def InConstructor(self):
"""Returns true if the current token is within a constructor.
Returns:
True if the current token is within a constructor.
"""
return self.InFunction() and self._functions[-1].is_constructor
def InInterfaceMethod(self):
"""Returns true if the current token is within an interface method.
Returns:
True if the current token is within an interface method.
"""
if self.InFunction():
if self._functions[-1].is_interface:
return True
else:
name = self._functions[-1].name
prototype_index = name.find('.prototype.')
if prototype_index != -1:
class_function_name = name[0:prototype_index]
if (class_function_name in self._functions_by_name and
self._functions_by_name[class_function_name].is_interface):
return True
return False
def InTopLevelFunction(self):
"""Returns true if the current token is within a top level function.
Returns:
True if the current token is within a top level function.
"""
return len(self._functions) == 1 and self.InTopLevel()
def InAssignedFunction(self):
"""Returns true if the current token is within a function variable.
Returns:
True if if the current token is within a function variable
"""
return self.InFunction() and self._functions[-1].is_assigned
def IsFunctionOpen(self):
"""Returns true if the current token is a function block open.
Returns:
True if the current token is a function block open.
"""
return (self._functions and
self._functions[-1].block_depth == self._block_depth - 1)
def IsFunctionClose(self):
"""Returns true if the current token is a function block close.
Returns:
True if the current token is a function block close.
"""
return (self._functions and
self._functions[-1].block_depth == self._block_depth)
def InBlock(self):
"""Returns true if the current token is within a block.
Returns:
True if the current token is within a block.
"""
return bool(self._block_depth)
def IsBlockClose(self):
"""Returns true if the current token is a block close.
Returns:
True if the current token is a block close.
"""
return self._is_block_close
def InObjectLiteral(self):
"""Returns true if the current token is within an object literal.
Returns:
True if the current token is within an object literal.
"""
return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
def InObjectLiteralDescendant(self):
"""Returns true if the current token has an object literal ancestor.
Returns:
True if the current token has an object literal ancestor.
"""
return self.OBJECT_LITERAL in self._block_types
def InParentheses(self):
"""Returns true if the current token is within parentheses.
Returns:
True if the current token is within parentheses.
"""
return bool(self._paren_depth)
def InTopLevel(self):
"""Whether we are at the top level in the class.
This function call is language specific. In some languages like
JavaScript, a function is top level if it is not inside any parenthesis.
In languages such as ActionScript, a function is top level if it is directly
within a class.
"""
raise TypeError('Abstract method InTopLevel not implemented')
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
Code blocks come after parameters, keywords like else, and closing parens.
Args:
token: The current token. Can be assumed to be type START_BLOCK.
Returns:
Code block type for current token.
"""
raise TypeError('Abstract method GetBlockType not implemented')
def GetParams(self):
"""Returns the accumulated input params as an array.
In some EcmasSript languages, input params are specified like
(param:Type, param2:Type2, ...)
in other they are specified just as
(param, param2)
We handle both formats for specifying parameters here and leave
it to the compilers for each language to detect compile errors.
This allows more code to be reused between lint checkers for various
EcmaScript languages.
Returns:
The accumulated input params as an array.
"""
params = []
if self._cumulative_params:
params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
# Strip out the type from parameters of the form name:Type.
params = map(lambda param: param.split(':')[0], params)
return params
def GetLastComment(self):
"""Return the last plain comment that could be used as documentation.
Returns:
The last plain comment that could be used as documentation.
"""
return self._last_comment
def GetDocComment(self):
"""Return the most recent applicable documentation comment.
Returns:
The last applicable documentation comment.
"""
return self._doc_comment
def HasDocComment(self, identifier):
"""Returns whether the identifier has been documented yet.
Args:
identifier: The identifier.
Returns:
Whether the identifier has been documented yet.
"""
return identifier in self._documented_identifiers
def InDocComment(self):
"""Returns whether the current token is in a doc comment.
Returns:
Whether the current token is in a doc comment.
"""
return self._doc_comment and self._doc_comment.end_token is None
def GetDocFlag(self):
"""Returns the current documentation flags.
Returns:
The current documentation flags.
"""
return self._doc_flag
def IsTypeToken(self, t):
if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
None, True)
if f and f.attached_object.type_start_token is not None:
return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
return False
def GetFunction(self):
"""Return the function the current code block is a part of.
Returns:
The current Function object.
"""
if self._functions:
return self._functions[-1]
def GetBlockDepth(self):
"""Return the block depth.
Returns:
The current block depth.
"""
return self._block_depth
def GetLastNonSpaceToken(self):
"""Return the last non whitespace token."""
return self._last_non_space_token
def GetLastLine(self):
"""Return the last line."""
return self._last_line
def GetFirstToken(self):
"""Return the very first token in the file."""
return self._first_token
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
last_non_space_token:
"""
self._is_block_close = False
if not self._first_token:
self._first_token = token
# Track block depth.
type = token.type
if type == Type.START_BLOCK:
self._block_depth += 1
# Subclasses need to handle block start very differently because
# whether a block is a CODE or OBJECT_LITERAL block varies significantly
# by language.
self._block_types.append(self.GetBlockType(token))
# Track block depth.
elif type == Type.END_BLOCK:
self._is_block_close = not self.InObjectLiteral()
self._block_depth -= 1
self._block_types.pop()
# Track parentheses depth.
elif type == Type.START_PAREN:
self._paren_depth += 1
# Track parentheses depth.
elif type == Type.END_PAREN:
self._paren_depth -= 1
elif type == Type.COMMENT:
self._last_comment = token.string
elif type == Type.START_DOC_COMMENT:
self._last_comment = None
self._doc_comment = DocComment(token)
elif type == Type.END_DOC_COMMENT:
self._doc_comment.end_token = token
elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
flag = self._doc_flag(token)
token.attached_object = flag
self._doc_comment.AddFlag(flag)
if flag.flag_type == 'param' and flag.name:
self._doc_comment.AddParam(flag.name, flag.type)
elif flag.flag_type == 'suppress':
self._doc_comment.AddSuppression(token)
elif type == Type.FUNCTION_DECLARATION:
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
True)
doc = None
# Only functions outside of parens are eligible for documentation.
if not self._paren_depth:
doc = self._doc_comment
name = ''
is_assigned = last_code and (last_code.IsOperator('=') or
last_code.IsOperator('||') or last_code.IsOperator('&&') or
(last_code.IsOperator(':') and not self.InObjectLiteral()))
if is_assigned:
# TODO(robbyw): This breaks for x[2] = ...
# Must use loop to find full function name in the case of line-wrapped
# declarations (bug 1220601) like:
# my.function.foo.
# bar = function() ...
identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
while identifier and identifier.type in (
Type.IDENTIFIER, Type.SIMPLE_LVALUE):
name = identifier.string + name
# Traverse behind us, skipping whitespace and comments.
while True:
identifier = identifier.previous
if not identifier or not identifier.type in Type.NON_CODE_TYPES:
break
else:
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
while next_token and next_token.IsType(Type.FUNCTION_NAME):
name += next_token.string
next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
function = Function(self._block_depth, is_assigned, doc, name)
self._functions.append(function)
self._functions_by_name[name] = function
elif type == Type.START_PARAMETERS:
self._cumulative_params = ''
elif type == Type.PARAMETERS:
self._cumulative_params += token.string
elif type == Type.KEYWORD and token.string == 'return':
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if not next_token.IsType(Type.SEMICOLON):
function = self.GetFunction()
if function:
function.has_return = True
elif type == Type.KEYWORD and token.string == 'throw':
function = self.GetFunction()
if function:
function.has_throw = True
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()
if jsdoc:
self._documented_identifiers.add(identifier)
self._HandleIdentifier(identifier, True)
elif type == Type.IDENTIFIER:
self._HandleIdentifier(token.string, False)
# Detect documented non-assignments.
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_token.IsType(Type.SEMICOLON):
if (self._last_non_space_token and
self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
self._documented_identifiers.add(token.string)
def _HandleIdentifier(self, identifier, is_assignment):
"""Process the given identifier.
Currently checks if it references 'this' and annotates the function
accordingly.
Args:
identifier: The identifer to process.
is_assignment: Whether the identifer is being written to.
"""
if identifier == 'this' or identifier.startswith('this.'):
function = self.GetFunction()
if function:
function.has_this = True
def HandleAfterToken(self, token):
"""Handle updating state after a token has been checked.
This function should be used for destructive state changes such as
deleting a tracked object.
Args:
token: The token to handle.
"""
type = token.type
if type == Type.SEMICOLON or type == Type.END_PAREN or (
type == Type.END_BRACKET and
self._last_non_space_token.type not in (
Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
# We end on any numeric array index, but keep going for string based
# array indices so that we pick up manually exported identifiers.
self._doc_comment = None
self._last_comment = None
elif type == Type.END_BLOCK:
self._doc_comment = None
self._last_comment = None
if self.InFunction() and self.IsFunctionClose():
# TODO(robbyw): Detect the function's name for better errors.
self._functions.pop()
elif type == Type.END_PARAMETERS and self._doc_comment:
self._doc_comment = None
self._last_comment = None
if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
self._last_non_space_token = token
self._last_line = token.line
| bsd-3-clause |
sileht/gnocchi | gnocchi/tests/functional/fixtures.py | 1 | 9173 | #
# Copyright 2015-2017 Red Hat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for use with gabbi tests."""
from __future__ import absolute_import
import logging
import os
import shutil
import subprocess
import tempfile
import threading
import time
from unittest import case
import uuid
import warnings
import fixtures
from gabbi import fixture
import numpy
from oslo_config import cfg
from oslo_middleware import cors
import sqlalchemy_utils
import yaml
from gnocchi import chef
from gnocchi.cli import metricd
from gnocchi import incoming
from gnocchi import indexer
from gnocchi.indexer import sqlalchemy
from gnocchi.rest import app
from gnocchi import service
from gnocchi import storage
from gnocchi.tests import base
from gnocchi.tests import utils
# NOTE(chdent): Hack to restore semblance of global configuration to
# pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso
# configuration, and the pecan application configuration of
# which the critical part is a reference to the current indexer.
LOAD_APP_KWARGS = None
def setup_app():
global LOAD_APP_KWARGS
return app.load_app(**LOAD_APP_KWARGS)
class AssertNAN(yaml.YAMLObject):
def __eq__(self, other):
try:
return numpy.isnan(other)
except TypeError:
return False
yaml.add_constructor(u'!AssertNAN', lambda loader, node: AssertNAN())
class ConfigFixture(fixture.GabbiFixture):
"""Establish the relevant configuration fixture, per test file.
Each test file gets its own oslo config and its own indexer and storage
instance. The indexer is based on the current database url. The storage
uses a temporary directory.
To use this fixture in a gabbit add::
fixtures:
- ConfigFixture
"""
def __init__(self):
self.conf = None
self.tmp_dir = None
def start_fixture(self):
"""Create necessary temp files and do the config dance."""
global LOAD_APP_KWARGS
if not os.getenv("GNOCCHI_TEST_DEBUG"):
self.output = base.CaptureOutput()
self.output.setUp()
data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi')
if os.getenv("GABBI_LIVE"):
dcf = None
else:
dcf = []
conf = service.prepare_service([], conf=utils.prepare_conf(),
default_config_files=dcf,
logging_level=logging.DEBUG,
skip_log_opts=True)
py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..',))
conf.set_override('paste_config',
os.path.join(py_root, 'rest', 'api-paste.ini'),
group="api")
conf.set_override('policy_file',
os.path.join(py_root, 'rest', 'policy.json'),
group="oslo_policy")
# NOTE(sileht): This is not concurrency safe, but only this tests file
# deal with cors, so we are fine. set_override don't work because cors
# group doesn't yet exists, and we the CORS middleware is created it
# register the option and directly copy value of all configurations
# options making impossible to override them properly...
cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com")
self.conf = conf
self.tmp_dir = data_tmp_dir
if conf.indexer.url is None:
raise case.SkipTest("No indexer configured")
storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
conf.set_override('driver', storage_driver, 'storage')
if conf.storage.driver == 'file':
conf.set_override('file_basepath', data_tmp_dir, 'storage')
elif conf.storage.driver == 'ceph':
conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
'storage')
pool_name = uuid.uuid4().hex
with open(os.devnull, 'w') as f:
subprocess.call("rados -c %s mkpool %s" % (
os.getenv("CEPH_CONF"), pool_name), shell=True,
stdout=f, stderr=subprocess.STDOUT)
conf.set_override('ceph_pool', pool_name, 'storage')
elif conf.storage.driver == "s3":
conf.set_override('s3_endpoint_url',
os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
group="storage")
conf.set_override('s3_access_key_id', "gnocchi", group="storage")
conf.set_override('s3_secret_access_key', "anythingworks",
group="storage")
conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
"storage")
elif conf.storage.driver == "swift":
# NOTE(sileht): This fixture must start before any driver stuff
swift_fixture = fixtures.MockPatch(
'swiftclient.client.Connection',
base.FakeSwiftClient)
swift_fixture.setUp()
# NOTE(jd) All of that is still very SQL centric but we only support
# SQL for now so let's say it's good enough.
conf.set_override(
'url',
sqlalchemy.SQLAlchemyIndexer._create_new_database(
conf.indexer.url),
'indexer')
index = indexer.get_driver(conf)
index.upgrade()
# Set pagination to a testable value
conf.set_override('max_limit', 7, 'api')
conf.set_override('enable_proxy_headers_parsing', True, group="api")
self.index = index
self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()),
conf.coordination_url)
s = storage.get_driver(conf)
i = incoming.get_driver(conf)
if conf.storage.driver == 'redis':
# Create one prefix per test
s.STORAGE_PREFIX = str(uuid.uuid4()).encode()
if conf.incoming.driver == 'redis':
i.SACK_NAME_FORMAT = (
str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
)
self.fixtures = [
fixtures.MockPatch("gnocchi.storage.get_driver",
return_value=s),
fixtures.MockPatch("gnocchi.incoming.get_driver",
return_value=i),
fixtures.MockPatch("gnocchi.indexer.get_driver",
return_value=self.index),
fixtures.MockPatch(
"gnocchi.cli.metricd.get_coordinator_and_start",
return_value=self.coord),
]
for f in self.fixtures:
f.setUp()
if conf.storage.driver == 'swift':
self.fixtures.append(swift_fixture)
LOAD_APP_KWARGS = {
'conf': conf,
}
s.upgrade()
i.upgrade(128)
# start up a thread to async process measures
self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s))
self.metricd_thread.start()
def stop_fixture(self):
"""Clean up the config fixture and storage artifacts."""
if hasattr(self, 'metricd_thread'):
self.metricd_thread.stop()
self.metricd_thread.join()
if hasattr(self, 'fixtures'):
for f in reversed(self.fixtures):
f.cleanUp()
if hasattr(self, 'index'):
self.index.disconnect()
# Swallow noise from missing tables when dropping
# database.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
module='sqlalchemy.engine.default')
sqlalchemy_utils.drop_database(self.conf.indexer.url)
if self.tmp_dir:
shutil.rmtree(self.tmp_dir)
if hasattr(self, 'coord'):
self.coord.stop()
self.conf.reset()
if not os.getenv("GNOCCHI_TEST_DEBUG"):
self.output.cleanUp()
class MetricdThread(threading.Thread):
"""Run metricd in a naive thread to process measures."""
def __init__(self, chef, name='metricd'):
super(MetricdThread, self).__init__(name=name)
self.chef = chef
self.flag = True
def run(self):
while self.flag:
for sack in self.chef.incoming.iter_sacks():
self.chef.process_new_measures_for_sack(sack, blocking=True)
time.sleep(0.1)
def stop(self):
self.flag = False
| apache-2.0 |
meabsence/python-for-android | python3-alpha/python3-src/Lib/encodings/iso8859_3.py | 272 | 13089 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
'\u02d8' # 0xA2 -> BREVE
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe'
'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\ufffe'
'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\ufffe'
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\ufffe'
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\ufffe'
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\ufffe'
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
asm666/sympy | sympy/utilities/pytest.py | 78 | 4728 | """py.test hacks to support XFAIL/XPASS"""
from __future__ import print_function, division
import sys
import functools
import os
from sympy.core.compatibility import get_function_name
try:
import py
from py.test import skip, raises
USE_PYTEST = getattr(sys, '_running_pytest', False)
except ImportError:
USE_PYTEST = False
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
if not USE_PYTEST:
def raises(expectedException, code=None):
"""
Tests that ``code`` raises the exception ``expectedException``.
``code`` may be a callable, such as a lambda expression or function
name.
If ``code`` is not given or None, ``raises`` will return a context
manager for use in ``with`` statements; the code to execute then
comes from the scope of the ``with``.
``raises()`` does nothing if the callable raises the expected exception,
otherwise it raises an AssertionError.
Examples
========
>>> from sympy.utilities.pytest import raises
>>> raises(ZeroDivisionError, lambda: 1/0)
>>> raises(ZeroDivisionError, lambda: 1/2)
Traceback (most recent call last):
...
AssertionError: DID NOT RAISE
>>> with raises(ZeroDivisionError):
... n = 1/0
>>> with raises(ZeroDivisionError):
... n = 1/2
Traceback (most recent call last):
...
AssertionError: DID NOT RAISE
Note that you cannot test multiple statements via
``with raises``:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise, aborting the ``with``
... n = 9999/0 # never executed
This is just what ``with`` is supposed to do: abort the
contained statement sequence at the first exception and let
the context manager deal with the exception.
To test multiple statements, you'll need a separate ``with``
for each:
>>> with raises(ZeroDivisionError):
... n = 1/0 # will execute and raise
>>> with raises(ZeroDivisionError):
... n = 9999/0 # will also execute and raise
"""
if code is None:
return RaisesContext(expectedException)
elif callable(code):
try:
code()
except expectedException:
return
raise AssertionError("DID NOT RAISE")
elif isinstance(code, str):
raise TypeError(
'\'raises(xxx, "code")\' has been phased out; '
'change \'raises(xxx, "expression")\' '
'to \'raises(xxx, lambda: expression)\', '
'\'raises(xxx, "statement")\' '
'to \'with raises(xxx): statement\'')
else:
raise TypeError(
'raises() expects a callable for the 2nd argument.')
class RaisesContext(object):
def __init__(self, expectedException):
self.expectedException = expectedException
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
raise AssertionError("DID NOT RAISE")
return issubclass(exc_type, self.expectedException)
class XFail(Exception):
pass
class XPass(Exception):
pass
class Skipped(Exception):
pass
def XFAIL(func):
def wrapper():
try:
func()
except Exception as e:
message = str(e)
if message != "Timeout":
raise XFail(get_function_name(func))
else:
raise Skipped("Timeout")
raise XPass(get_function_name(func))
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def skip(str):
raise Skipped(str)
def SKIP(reason):
"""Similar to :func:`skip`, but this is a decorator. """
def wrapper(func):
def func_wrapper():
raise Skipped(reason)
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
return wrapper
def slow(func):
func._slow = True
def func_wrapper():
func()
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
else:
XFAIL = py.test.mark.xfail
slow = py.test.mark.slow
def SKIP(reason):
def skipping(func):
@functools.wraps(func)
def inner(*args, **kwargs):
skip(reason)
return inner
return skipping
| bsd-3-clause |
ViennaChen/mysql-connector-python | setupinfo.py | 7 | 4296 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from distutils.core import Extension
import os
import sys
from lib.cpy_distutils import (
Install, InstallLib, BuildExtDynamic, BuildExtStatic
)
# Development Status Trove Classifiers significant for Connector/Python
DEVELOPMENT_STATUSES = {
'a': '3 - Alpha',
'b': '4 - Beta',
'rc': '4 - Beta', # There is no Classifier for Release Candidates
'': '5 - Production/Stable'
}
if not (((2, 6) <= sys.version_info < (3, 0)) or sys.version_info >= (3, 3)):
raise RuntimeError("Python v{major}.{minor} is not supported".format(
major=sys.version_info[0], minor=sys.version_info[1]
))
# Load version information
VERSION = [999, 0, 0, 'a', 0] # Set correct after version.py is loaded
version_py = os.path.join('lib', 'mysql', 'connector', 'version.py')
with open(version_py, 'rb') as fp:
exec(compile(fp.read(), version_py, 'exec'))
BuildExtDynamic.min_connector_c_version = (5, 5, 8)
command_classes = {
'build_ext': BuildExtDynamic,
'build_ext_static': BuildExtStatic,
'install_lib': InstallLib,
'install': Install,
}
package_dir = {'': 'lib'}
name = 'mysql-connector-python'
version = '{0}.{1}.{2}'.format(*VERSION[0:3])
extensions = [
Extension("_mysql_connector",
sources=[
"src/exceptions.c",
"src/mysql_capi.c",
"src/mysql_capi_conversion.c",
"src/mysql_connector.c",
"src/force_cpp_linkage.cc",
],
include_dirs=['src/include'],
)
]
packages = [
'mysql',
'mysql.connector',
'mysql.connector.locales',
'mysql.connector.locales.eng',
'mysql.connector.django',
'mysql.connector.fabric',
]
description = "MySQL driver written in Python"
long_description = """
MySQL driver written in Python which does not depend on MySQL C client
libraries and implements the DB API v2.0 specification (PEP-249).
"""
author = 'Oracle and/or its affiliates'
author_email = ''
maintainer = 'Geert Vanderkelen'
maintainer_email = '[email protected]'
cpy_gpl_license = "GNU GPLv2 (with FOSS License Exception)"
keywords = "mysql db",
url = 'http://dev.mysql.com/doc/connector-python/en/index.html'
download_url = 'http://dev.mysql.com/downloads/connector/python/'
classifiers = [
'Development Status :: %s' % (DEVELOPMENT_STATUSES[VERSION[3]]),
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Database',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'
]
| gpl-2.0 |
kustodian/ansible | lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py | 11 | 17073 | #!/usr/bin/python
# Copyright (c) 2017 Jon Meran <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_compute_environment
short_description: Manage AWS Batch Compute Environments
description:
- This module allows the management of AWS Batch Compute Environments.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
compute_environment_name:
description:
- The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores
are allowed.
required: true
type: str
type:
description:
- The type of the compute environment.
required: true
choices: ["MANAGED", "UNMANAGED"]
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
compute_environment_state:
description:
- The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs
from a queue and can scale out automatically based on queues.
default: "ENABLED"
choices: ["ENABLED", "DISABLED"]
type: str
service_role:
description:
- The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
services on your behalf.
required: true
type: str
compute_resource_type:
description:
- The type of compute resource.
required: true
choices: ["EC2", "SPOT"]
type: str
minv_cpus:
description:
- The minimum number of EC2 vCPUs that an environment should maintain.
required: true
type: int
maxv_cpus:
description:
- The maximum number of EC2 vCPUs that an environment can reach.
required: true
type: int
desiredv_cpus:
description:
- The desired number of EC2 vCPUS in the compute environment.
type: int
instance_types:
description:
- The instance types that may be launched.
required: true
type: list
elements: str
image_id:
description:
- The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
type: str
subnets:
description:
- The VPC subnets into which the compute resources are launched.
required: true
type: list
elements: str
security_group_ids:
description:
- The EC2 security groups that are associated with instances launched in the compute environment.
required: true
type: list
elements: str
ec2_key_pair:
description:
- The EC2 key pair that is used for instances launched in the compute environment.
type: str
instance_role:
description:
- The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
required: true
type: str
tags:
description:
- Key-value pair tags to be applied to resources that are launched in the compute environment.
type: dict
bid_percentage:
description:
- The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price
must be below 20% of the current On-Demand price for that EC2 instance.
type: int
spot_iam_fleet_role:
description:
- The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
type: str
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Compute Environment
aws_batch_compute_environment:
compute_environment_name: computeEnvironmentName
state: present
region: us-east-1
compute_environment_state: ENABLED
type: MANAGED
compute_resource_type: EC2
minv_cpus: 0
maxv_cpus: 2
desiredv_cpus: 1
instance_types:
- optimal
subnets:
- my-subnet1
- my-subnet2
security_group_ids:
- my-sg1
- my-sg2
instance_role: arn:aws:iam::<account>:instance-profile/<role>
tags:
tag1: value1
tag2: value2
service_role: arn:aws:iam::<account>:role/service-role/<role>
register: aws_batch_compute_environment_action
- name: show results
debug:
var: aws_batch_compute_environment_action
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_compute_environment_action: none
changed: false
invocation:
module_args:
aws_access_key: ~
aws_secret_key: ~
bid_percentage: ~
compute_environment_name: <name>
compute_environment_state: ENABLED
compute_resource_type: EC2
desiredv_cpus: 0
ec2_key_pair: ~
ec2_url: ~
image_id: ~
instance_role: "arn:aws:iam::..."
instance_types:
- optimal
maxv_cpus: 8
minv_cpus: 0
profile: ~
region: us-east-1
security_group_ids:
- "*******"
security_token: ~
service_role: "arn:aws:iam::...."
spot_iam_fleet_role: ~
state: present
subnets:
- "******"
tags:
Environment: <name>
Name: <name>
type: MANAGED
validate_certs: true
response:
computeEnvironmentArn: "arn:aws:batch:...."
computeEnvironmentName: <name>
computeResources:
desiredvCpus: 0
instanceRole: "arn:aws:iam::..."
instanceTypes:
- optimal
maxvCpus: 8
minvCpus: 0
securityGroupIds:
- "******"
subnets:
- "*******"
tags:
Environment: <name>
Name: <name>
type: EC2
ecsClusterArn: "arn:aws:ecs:....."
serviceRole: "arn:aws:iam::..."
state: ENABLED
status: VALID
statusReason: "ComputeEnvironment Healthy"
type: MANAGED
type: dict
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.batch import AWSConnection
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, HAS_BOTO3
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
import re
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
except ImportError:
pass # Handled by HAS_BOTO3
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
return snake_dict_to_camel_dict(api_params)
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
compute_environment_name = module.params['compute_environment_name']
# validate compute environment name
if not re.search(r'^[\w\_:]+$', compute_environment_name):
module.fail_json(
msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
"and underscores.".format(compute_environment_name)
)
if not compute_environment_name.startswith('arn:aws:batch:'):
if len(compute_environment_name) > 128:
module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
.format(compute_environment_name))
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Compute Environment functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_compute_environment(module, connection):
try:
environments = connection.client().describe_compute_environments(
computeEnvironments=[module.params['compute_environment_name']]
)
if len(environments['computeEnvironments']) > 0:
return environments['computeEnvironments'][0]
else:
return None
except ClientError:
return None
def create_compute_environment(module, aws):
"""
Adds a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
params = (
'compute_environment_name', 'type', 'service_role')
api_params = set_api_params(module, params)
if module.params['compute_environment_state'] is not None:
api_params['state'] = module.params['compute_environment_state']
compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
'spot_iam_fleet_role')
compute_resources_params = set_api_params(module, compute_resources_param_list)
if module.params['compute_resource_type'] is not None:
compute_resources_params['type'] = module.params['compute_resource_type']
# if module.params['minv_cpus'] is not None:
# compute_resources_params['minvCpus'] = module.params['minv_cpus']
api_params['computeResources'] = compute_resources_params
try:
if not module.check_mode:
client.create_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def remove_compute_environment(module, aws):
"""
Remove a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
api_params = {'computeEnvironment': module.params['compute_environment_name']}
try:
if not module.check_mode:
client.delete_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def manage_state(module, aws):
changed = False
current_state = 'absent'
state = module.params['state']
compute_environment_state = module.params['compute_environment_state']
compute_environment_name = module.params['compute_environment_name']
service_role = module.params['service_role']
minv_cpus = module.params['minv_cpus']
maxv_cpus = module.params['maxv_cpus']
desiredv_cpus = module.params['desiredv_cpus']
action_taken = 'none'
update_env_response = ''
check_mode = module.check_mode
# check if the compute environment exists
current_compute_environment = get_current_compute_environment(module, aws)
response = current_compute_environment
if current_compute_environment:
current_state = 'present'
if state == 'present':
if current_state == 'present':
updates = False
# Update Batch Compute Environment configuration
compute_kwargs = {'computeEnvironment': compute_environment_name}
# Update configuration if needed
compute_resources = {}
if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
compute_kwargs.update({'state': compute_environment_state})
updates = True
if service_role and current_compute_environment['serviceRole'] != service_role:
compute_kwargs.update({'serviceRole': service_role})
updates = True
if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
compute_resources['minvCpus'] = minv_cpus
if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
compute_resources['maxvCpus'] = maxv_cpus
if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
compute_resources['desiredvCpus'] = desiredv_cpus
if len(compute_resources) > 0:
compute_kwargs['computeResources'] = compute_resources
updates = True
if updates:
try:
if not check_mode:
update_env_response = aws.client().update_compute_environment(**compute_kwargs)
if not update_env_response:
module.fail_json(msg='Unable to get compute environment information after creating')
changed = True
action_taken = "updated"
except (ParamValidationError, ClientError) as e:
module.fail_json(msg="Unable to update environment: {0}".format(to_native(e)),
exception=traceback.format_exc())
else:
# Create Batch Compute Environment
changed = create_compute_environment(module, aws)
# Describe compute environment
action_taken = 'added'
response = get_current_compute_environment(module, aws)
if not response:
module.fail_json(msg='Unable to get compute environment information after creating')
else:
if current_state == 'present':
# remove the compute environment
changed = remove_compute_environment(module, aws)
action_taken = 'deleted'
return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: changed, batch_compute_environment_action, response
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
compute_environment_name=dict(required=True),
type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
service_role=dict(required=True),
compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
minv_cpus=dict(type='int', required=True),
maxv_cpus=dict(type='int', required=True),
desiredv_cpus=dict(type='int'),
instance_types=dict(type='list', required=True),
image_id=dict(),
subnets=dict(type='list', required=True),
security_group_ids=dict(type='list', required=True),
ec2_key_pair=dict(),
instance_role=dict(required=True),
tags=dict(type='dict'),
bid_percentage=dict(type='int'),
spot_iam_fleet_role=dict(),
region=dict(aliases=['aws_region', 'ec2_region'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['batch'])
validate_params(module, aws)
results = manage_state(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
if __name__ == '__main__':
main()
| gpl-3.0 |
boundarydevices/android_external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/java_heap_profiler.py | 8 | 3258 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import threading
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
try:
from pylib import constants # pylint: disable=F0401
except Exception:
constants = None
class JavaHeapProfiler(profiler.Profiler):
"""Android-specific, trigger and fetch java heap dumps."""
_DEFAULT_DEVICE_DIR = '/data/local/tmp/javaheap'
# TODO(bulach): expose this as a command line option somehow.
_DEFAULT_INTERVAL = 20
def __init__(self, browser_backend, platform_backend, output_path, state):
super(JavaHeapProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._run_count = 1
self._DumpJavaHeap(False)
self._timer = threading.Timer(self._DEFAULT_INTERVAL, self._OnTimer)
self._timer.start()
@classmethod
def name(cls):
return 'java-heap'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._timer.cancel()
self._DumpJavaHeap(True)
self._browser_backend.adb.device().old_interface.Adb().Pull(
self._DEFAULT_DEVICE_DIR, self._output_path)
self._browser_backend.adb.RunShellCommand(
'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*'))
output_files = []
for f in os.listdir(self._output_path):
if os.path.splitext(f)[1] == '.aprof':
input_file = os.path.join(self._output_path, f)
output_file = input_file.replace('.aprof', '.hprof')
hprof_conv = os.path.join(constants.ANDROID_SDK_ROOT,
'tools', 'hprof-conv')
subprocess.call([hprof_conv, input_file, output_file])
output_files.append(output_file)
return output_files
def _OnTimer(self):
self._DumpJavaHeap(False)
def _DumpJavaHeap(self, wait_for_completion):
if not self._browser_backend.adb.device().old_interface.FileExistsOnDevice(
self._DEFAULT_DEVICE_DIR):
self._browser_backend.adb.RunShellCommand(
'mkdir -p ' + self._DEFAULT_DEVICE_DIR)
self._browser_backend.adb.RunShellCommand(
'chmod 777 ' + self._DEFAULT_DEVICE_DIR)
device_dump_file = None
for pid in self._GetProcessOutputFileMap().iterkeys():
device_dump_file = '%s/%s.%s.aprof' % (self._DEFAULT_DEVICE_DIR, pid,
self._run_count)
self._browser_backend.adb.RunShellCommand('am dumpheap %s %s' %
(pid, device_dump_file))
if device_dump_file and wait_for_completion:
util.WaitFor(lambda: self._FileSize(device_dump_file) > 0, timeout=2)
self._run_count += 1
def _FileSize(self, file_name):
f = self._browser_backend.adb.device().old_interface.ListPathContents(
file_name)
return f.get(os.path.basename(file_name), (0, ))[0]
| bsd-3-clause |
arantebillywilson/python-snippets | microblog/flask/lib/python3.5/site-packages/whoosh/util/__init__.py | 52 | 4424 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import random, sys, time
from bisect import insort, bisect_left
from functools import wraps
from whoosh.compat import xrange
# These must be valid separate characters in CASE-INSENSTIVE filenames
IDCHARS = "0123456789abcdefghijklmnopqrstuvwxyz"
if hasattr(time, "perf_counter"):
now = time.perf_counter
elif sys.platform == 'win32':
now = time.clock
else:
now = time.time
def random_name(size=28):
return "".join(random.choice(IDCHARS) for _ in xrange(size))
def random_bytes(size=28):
gen = (random.randint(0, 255) for _ in xrange(size))
if sys.version_info[0] >= 3:
return bytes(gen)
else:
return array("B", gen).tostring()
def make_binary_tree(fn, args, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
arguments and returns a binary tree of results/instances.
>>> make_binary_tree(UnionMatcher, [matcher1, matcher2, matcher3])
UnionMatcher(matcher1, UnionMatcher(matcher2, matcher3))
Any keyword arguments given to this function are passed to the class
initializer.
"""
count = len(args)
if not count:
raise ValueError("Called make_binary_tree with empty list")
elif count == 1:
return args[0]
half = count // 2
return fn(make_binary_tree(fn, args[:half], **kwargs),
make_binary_tree(fn, args[half:], **kwargs), **kwargs)
def make_weighted_tree(fn, ls, **kwargs):
"""Takes a function/class that takes two positional arguments and a list of
(weight, argument) tuples and returns a huffman-like weighted tree of
results/instances.
"""
if not ls:
raise ValueError("Called make_weighted_tree with empty list")
ls.sort()
while len(ls) > 1:
a = ls.pop(0)
b = ls.pop(0)
insort(ls, (a[0] + b[0], fn(a[1], b[1])))
return ls[0][1]
# Fibonacci function
_fib_cache = {}
def fib(n):
"""Returns the nth value in the Fibonacci sequence.
"""
if n <= 2:
return n
if n in _fib_cache:
return _fib_cache[n]
result = fib(n - 1) + fib(n - 2)
_fib_cache[n] = result
return result
# Decorators
def synchronized(func):
"""Decorator for storage-access methods, which synchronizes on a threading
lock. The parent object must have 'is_closed' and '_sync_lock' attributes.
"""
@wraps(func)
def synchronized_wrapper(self, *args, **kwargs):
with self._sync_lock:
return func(self, *args, **kwargs)
return synchronized_wrapper
def unclosed(method):
"""
Decorator to check if the object is closed.
"""
@wraps(method)
def unclosed_wrapper(self, *args, **kwargs):
if self.closed:
raise ValueError("Operation on a closed object")
return method(self, *args, **kwargs)
return unclosed_wrapper
| mit |
ruzhytskyi/Koans | python3/koans/about_class_attributes.py | 97 | 4668 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutClassMethods in the Ruby Koans
#
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog:
pass
def test_objects_are_objects(self):
fido = self.Dog()
self.assertEqual(__, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(__, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(__, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(__, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(__, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(__, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(__, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda : 'fidos wag'
self.assertEqual(__, fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
with self.assertRaises(___): rover.wag()
# ------------------------------------------------------------------
class Dog2:
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_since_classes_are_objects_you_can_define_singleton_methods_on_them_too(self):
self.assertRegexpMatches(self.Dog2.growl(), __)
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.growl(), __)
self.assertRegexpMatches(self.Dog2.growl(), __)
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertRegexpMatches(self.Dog2.bark(), __)
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertRegexpMatches(fido.bark(), __)
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
with self.assertRaises(___): fido.name = "Fido"
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual(__, fido.get_name_from_instance())
self.assertEqual(__, self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual(__, fido.get_name())
self.assertEqual(__, self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4:
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual(__, self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual(__, self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_heres_an_easy_way_to_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual(__, fido.__class__.a_class_method())
| mit |
savoirfairelinux/django | tests/admin_inlines/admin.py | 17 | 5776 | from django import forms
from django.contrib import admin
from .models import (
Author, BinaryTree, CapoFamiglia, Chapter, ChildModel1, ChildModel2,
Consigliere, EditablePKBook, ExtraTerrestrial, Fashionista, Holder,
Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked,
Inner4Tabular, NonAutoPKBook, NonAutoPKBookChild, Novel,
ParentModelWithCustomPk, Poll, Profile, ProfileCollection, Question,
ReadOnlyInline, ShoppingWeakness, Sighting, SomeChildModel,
SomeParentModel, SottoCapo, Title, TitleCollection,
)
site = admin.AdminSite(name="admin")
class BookInline(admin.TabularInline):
model = Author.books.through
class NonAutoPKBookTabularInline(admin.TabularInline):
model = NonAutoPKBook
classes = ('collapse',)
class NonAutoPKBookChildTabularInline(admin.TabularInline):
model = NonAutoPKBookChild
classes = ('collapse',)
class NonAutoPKBookStackedInline(admin.StackedInline):
model = NonAutoPKBook
classes = ('collapse',)
class EditablePKBookTabularInline(admin.TabularInline):
model = EditablePKBook
class EditablePKBookStackedInline(admin.StackedInline):
model = EditablePKBook
class AuthorAdmin(admin.ModelAdmin):
inlines = [
BookInline, NonAutoPKBookTabularInline, NonAutoPKBookStackedInline,
EditablePKBookTabularInline, EditablePKBookStackedInline,
NonAutoPKBookChildTabularInline,
]
class InnerInline(admin.StackedInline):
model = Inner
can_delete = False
readonly_fields = ('readonly',) # For bug #13174 tests.
class HolderAdmin(admin.ModelAdmin):
class Media:
js = ('my_awesome_admin_scripts.js',)
class ReadOnlyInlineInline(admin.TabularInline):
model = ReadOnlyInline
readonly_fields = ['name']
class InnerInline2(admin.StackedInline):
model = Inner2
class Media:
js = ('my_awesome_inline_scripts.js',)
class InnerInline3(admin.StackedInline):
model = Inner3
class Media:
js = ('my_awesome_inline_scripts.js',)
class TitleForm(forms.ModelForm):
title1 = forms.CharField(max_length=100)
def clean(self):
cleaned_data = self.cleaned_data
title1 = cleaned_data.get("title1")
title2 = cleaned_data.get("title2")
if title1 != title2:
raise forms.ValidationError("The two titles must be the same")
return cleaned_data
class TitleInline(admin.TabularInline):
model = Title
form = TitleForm
extra = 1
class Inner4StackedInline(admin.StackedInline):
model = Inner4Stacked
show_change_link = True
class Inner4TabularInline(admin.TabularInline):
model = Inner4Tabular
show_change_link = True
class Holder4Admin(admin.ModelAdmin):
inlines = [Inner4StackedInline, Inner4TabularInline]
class InlineWeakness(admin.TabularInline):
model = ShoppingWeakness
extra = 1
class QuestionInline(admin.TabularInline):
model = Question
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in QuestionInline'
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
def call_me(self, obj):
return 'Callable in PollAdmin'
class ChapterInline(admin.TabularInline):
model = Chapter
readonly_fields = ['call_me']
def call_me(self, obj):
return 'Callable in ChapterInline'
class NovelAdmin(admin.ModelAdmin):
inlines = [ChapterInline]
class ConsigliereInline(admin.TabularInline):
model = Consigliere
class SottoCapoInline(admin.TabularInline):
model = SottoCapo
class ProfileInline(admin.TabularInline):
model = Profile
extra = 1
# admin for #18433
class ChildModel1Inline(admin.TabularInline):
model = ChildModel1
class ChildModel2Inline(admin.StackedInline):
model = ChildModel2
# admin for #19425 and #18388
class BinaryTreeAdmin(admin.TabularInline):
model = BinaryTree
def get_extra(self, request, obj=None, **kwargs):
extra = 2
if obj:
return extra - obj.binarytree_set.count()
return extra
def get_max_num(self, request, obj=None, **kwargs):
max_num = 3
if obj:
return max_num - obj.binarytree_set.count()
return max_num
# admin for #19524
class SightingInline(admin.TabularInline):
model = Sighting
# admin and form for #18263
class SomeChildModelForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = SomeChildModel
widgets = {
'position': forms.HiddenInput,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].label = 'new label'
class SomeChildModelInline(admin.TabularInline):
model = SomeChildModel
form = SomeChildModelForm
site.register(TitleCollection, inlines=[TitleInline])
# Test bug #12561 and #12778
# only ModelAdmin media
site.register(Holder, HolderAdmin, inlines=[InnerInline])
# ModelAdmin and Inline media
site.register(Holder2, HolderAdmin, inlines=[InnerInline2])
# only Inline media
site.register(Holder3, inlines=[InnerInline3])
site.register(Poll, PollAdmin)
site.register(Novel, NovelAdmin)
site.register(Fashionista, inlines=[InlineWeakness])
site.register(Holder4, Holder4Admin)
site.register(Author, AuthorAdmin)
site.register(CapoFamiglia, inlines=[ConsigliereInline, SottoCapoInline, ReadOnlyInlineInline])
site.register(ProfileCollection, inlines=[ProfileInline])
site.register(ParentModelWithCustomPk, inlines=[ChildModel1Inline, ChildModel2Inline])
site.register(BinaryTree, inlines=[BinaryTreeAdmin])
site.register(ExtraTerrestrial, inlines=[SightingInline])
site.register(SomeParentModel, inlines=[SomeChildModelInline])
site.register([Question, Inner4Stacked, Inner4Tabular])
| bsd-3-clause |
golismero/golismero | golismero/common.py | 8 | 38619 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Common constants, classes and functions used across GoLismero.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: [email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = [
# Dynamically loaded modules, picks the fastest one available.
"pickle", "random", "json_encode", "json_decode",
# Helper functions.
"get_user_settings_folder", "get_default_config_file",
"get_default_user_config_file", "get_default_plugins_folder",
"get_data_folder", "get_wordlists_folder",
"get_install_folder", "get_tools_folder",
"get_profiles_folder", "get_profile", "get_available_profiles",
# Helper classes and decorators.
"Singleton", "decorator", "export_methods_as_functions",
"EmptyNewStyleClass",
# Configuration objects.
"OrchestratorConfig", "AuditConfig"
]
# Load the fast C version of pickle,
# if not available use the pure-Python version.
try:
import cPickle as pickle
except ImportError:
import pickle
# Import @decorator from the decorator module, if available.
# Otherwise define a simple but crude replacement.
try:
from decorator import decorator
except ImportError:
import functools
def decorator(w):
"""
The decorator module was not found. You can install it from:
http://pypi.python.org/pypi/decorator/
"""
def d(fn):
@functools.wraps(fn)
def x(*args, **kwargs):
return w(fn, *args, **kwargs)
return x
return d
try:
# The fastest JSON parser available for Python.
from cjson import decode as json_decode
from cjson import encode as json_encode
except ImportError:
try:
# Faster than the built-in module, usually found.
from simplejson import loads as json_decode
from simplejson import dumps as json_encode
except ImportError:
# Built-in module since Python 2.6, very very slow!
from json import loads as json_decode
from json import dumps as json_encode
# Other imports.
from netaddr import IPNetwork
from ConfigParser import RawConfigParser
from keyword import iskeyword
from os import path
import os
import random #noqa
import sys
# Remove the docstrings. This prevents errors when generating the API docs.
try:
json_encode.__doc__ = ""
except Exception:
_orig_json_encode = json_encode
def json_encode(*args, **kwargs):
return _orig_json_encode(*args, **kwargs)
try:
json_decode.__doc__ = ""
except Exception:
_orig_json_decode = json_decode
def json_decode(*args, **kwargs):
return _orig_json_decode(*args, **kwargs)
#------------------------------------------------------------------------------
# Helper class for instance creation without calling __init__().
class EmptyNewStyleClass (object):
pass
#------------------------------------------------------------------------------
_user_settings_folder = None
def get_user_settings_folder():
"""
Get the current user's GoLismero settings folder.
This folder will be used to store the various caches
and the user-defined plugins.
:returns: GoLismero settings folder.
:rtype: str
"""
# TODO: on Windows, use the roaming data folder instead.
# Return the cached value if available.
global _user_settings_folder
if _user_settings_folder:
return _user_settings_folder
# Get the user's home folder.
home = os.getenv("HOME") # Unix
if not home:
home = os.getenv("USERPROFILE") # Windows
# If all else fails, use the current directory.
if not home:
home = os.getcwd()
# Get the user settings folder.
folder = path.join(home, ".golismero")
# Make sure it ends with a slash.
if not folder.endswith(path.sep):
folder += path.sep
# Make sure it exists.
try:
os.makedirs(folder)
except Exception:
pass
# Cache the folder.
_user_settings_folder = folder
# Return the folder.
return folder
#------------------------------------------------------------------------------
def get_default_config_file():
"""
:returns:
Pathname of the default configuration file,
or None if it doesn't exist.
:rtype: str | None
"""
config_file = path.split(path.abspath(__file__))[0]
config_file = path.join(config_file, "..", "golismero.conf")
config_file = path.abspath(config_file)
if not path.isfile(config_file):
if path.sep == "/" and path.isfile("/etc/golismero.conf"):
config_file = "/etc/golismero.conf"
else:
config_file = None
return config_file
#------------------------------------------------------------------------------
def get_default_user_config_file():
"""
:returns:
Pathname of the default per-user configuration file,
or None if it doesn't exist.
:rtype: str | None
"""
config_file = path.join(get_user_settings_folder(), "user.conf")
if not path.isfile(config_file):
config_file = path.split(path.abspath(__file__))[0]
config_file = path.join(config_file, "..", "user.conf")
config_file = path.abspath(config_file)
if not path.isfile(config_file):
config_file = None
return config_file
#------------------------------------------------------------------------------
_install_folder = None
def get_install_folder():
"""
:returns: Pathname of the install folder.
:rtype: str
"""
global _install_folder
if not _install_folder:
pathname = path.split(path.abspath(__file__))[0]
pathname = path.join(pathname, "..")
pathname = path.abspath(pathname)
_install_folder = pathname
return _install_folder
#------------------------------------------------------------------------------
def get_tools_folder():
"""
:returns: Pathname of the bundled tools folder.
:rtype: str
"""
return path.join(get_install_folder(), "tools")
#------------------------------------------------------------------------------
def get_wordlists_folder():
"""
:returns: Pathname of the wordlists folder.
:rtype: str
"""
return path.join(get_install_folder(), "wordlist")
#------------------------------------------------------------------------------
def get_data_folder():
"""
:returns: Pathname of the data folder.
:rtype: str
"""
return path.join(get_install_folder(), "data")
#------------------------------------------------------------------------------
def get_default_plugins_folder():
"""
:returns: Default location for the plugins folder.
:rtype: str
"""
return path.join(get_install_folder(), "plugins")
#------------------------------------------------------------------------------
def get_profiles_folder():
"""
:returns: Pathname of the profiles folder.
:rtype: str
"""
return path.join(get_install_folder(), "profiles")
#------------------------------------------------------------------------------
def get_profile(name):
"""
Get the profile configuration file for the requested profile name.
:param name: Name of the profile.
:type name: str
:returns: Pathname of the profile configuration file.
:rtype: str
:raises ValueError: The name was invalid, or the profile was not found.
"""
# Trivial case.
if not name:
raise ValueError("No profile name given")
# Get the profiles folder.
profiles = get_profiles_folder()
# Get the filename for the requested profile.
filename = path.abspath(path.join(profiles, name + ".profile"))
# Check if it's outside the profiles folder or it doesn't exist.
if not profiles.endswith(path.sep):
profiles += path.sep
if not filename.startswith(profiles) or not path.isfile(filename):
raise ValueError("Profile not found: %r" % name)
# Return the filename.
return filename
#------------------------------------------------------------------------------
def get_available_profiles():
"""
:returns: Available profiles.
:rtype: set(str)
"""
profiles_folder = get_profiles_folder()
if not profiles_folder or not path.isdir(profiles_folder):
return set()
return {
path.splitext(name)[0]
for name in os.listdir(profiles_folder)
if name.endswith(".profile")
}
#------------------------------------------------------------------------------
class Singleton (object):
"""
Implementation of the Singleton pattern.
"""
# Variable where we keep the instance.
_instance = None
def __new__(cls):
# If the singleton has already been instanced, return it.
if cls._instance is not None:
return cls._instance
# Create the singleton's instance.
cls._instance = super(Singleton, cls).__new__(cls)
# Call the constructor.
cls.__init__(cls._instance)
# Delete the constructor so it won't be called again.
cls._instance.__init__ = object.__init__
cls.__init__ = object.__init__
# Return the instance.
return cls._instance
#------------------------------------------------------------------------------
def export_methods_as_functions(singleton, module):
"""
Export all methods from a Singleton instance as bare functions of a module.
:param singleton: Singleton instance to export.
:type singleton: Singleton
:param module: Target module name.
This would typically be \\_\\_name\\_\\_.
:type module: str
:raises KeyError: No module with that name is loaded.
"""
# TODO: maybe take the module name as input instead,
# and pull everything else from sys.modules.
clazz = singleton.__class__
module_obj = sys.modules[module]
try:
exports = module_obj.__all__
except AttributeError:
exports = module_obj.__all__ = []
for name in dir(clazz):
if name[0] != "_":
unbound = getattr(clazz, name)
if callable(unbound) and not isinstance(unbound, property):
bound = getattr(singleton, name)
setattr(module_obj, name, bound)
if name not in exports:
exports.append(name)
#------------------------------------------------------------------------------
class Configuration (object):
"""
Generic configuration class.
"""
#--------------------------------------------------------------------------
# The logic in configuration classes is always:
# - Checking options without fixing them is done in check_params().
# - Sanitizing (fixing) options is done in parsers or in property setters.
# - For each source, there's a "from_*" method. They add to the
# current options rather than overwriting them completely.
# This allows options to be read from multiple sources.
#--------------------------------------------------------------------------
# Here's where subclasses define the options.
#
# It's a dictionary of tuples of the following format:
#
# name: ( parser, default )
#
# Where "name" is the option name, "parser" is an optional
# callback to parse the input values, and "default" is an
# optional default value.
#
# If no parser is given, the values are preserved when set.
#
# Example:
# class MySettings(Configuration):
# _settings_ = {
# "verbose": (int, 0), # A complete definition.
# "output_file": str, # Omitting the default value (None is used).
# "data": None, # Omitting the parser too.
# }
#
_settings_ = dict()
# This is a set of properties that may not be loaded from a config file.
# They will still be loaded from objects, dictionaries, JSON, etc.
_forbidden_ = set()
#--------------------------------------------------------------------------
# Some helper parsers.
@staticmethod
def string(x):
if x is None:
return None
if isinstance(x, unicode):
return x.encode("UTF-8")
return str(x)
@staticmethod
def integer(x):
if type(x) in (int, long):
return x
return int(x, 0) if x else 0
@staticmethod
def integer_or_none(x):
if x is None or (hasattr(x, "lower") and
x.lower() in ("", "none", "inf", "infinite")):
return None
return Configuration.integer(x)
@staticmethod
def float(x):
return float(x) if x else 0.0
@staticmethod
def comma_separated_list(x):
if not x:
return []
if isinstance(x, str):
return [t.strip() for t in x.split(",")]
if isinstance(x, unicode):
return [t.strip().encode("UTF-8") for t in x.split(u",")]
return list(x)
@staticmethod
def boolean(x):
if not x:
return False
if x is True:
return x
if hasattr(x, "lower"):
return {
"enabled": True, # True
"enable": True,
"true": True,
"yes": True,
"y": True,
"1": True,
"disabled": False, # False
"disable": False,
"false": False,
"no": False,
"f": False,
"0": False,
}.get(x.lower(), bool(x))
return bool(x)
@staticmethod
def trinary(x):
if x in (None, True, False):
return x
if not hasattr(x, "lower"):
raise ValueError(
"Trinary values only accept True, False and None")
try:
return {
"enabled": True, # True
"enable": True,
"true": True,
"yes": True,
"y": True,
"1": True,
"disabled": False, # False
"disable": False,
"false": False,
"no": False,
"f": False,
"0": False,
"default": None, # None
"def": None,
"none": None,
"maybe": None,
"?": None,
"-1": None,
}[x.lower()]
except KeyError:
raise ValueError("Unknown value: %r" % x)
#--------------------------------------------------------------------------
def __init__(self):
history = set()
for name, definition in self._settings_.iteritems():
if name in history:
raise SyntaxError("Duplicated option name: %r" % name)
history.add(name)
if type(definition) not in (tuple, list):
definition = (definition, None)
self.__init_option(name, *definition)
#--------------------------------------------------------------------------
def __init_option(self, name, parser = None, default = None):
if name.endswith("_") or not name.replace("_", "").isalnum():
msg = "Option name %r is not a valid Python identifier"
raise SyntaxError(msg % name)
if iskeyword(name):
msg = "Option name %r is a Python reserved keyword"
raise SyntaxError(msg % name)
if name.startswith("__"):
msg = "Option name %r is a private Python identifier"
raise SyntaxError(msg % name)
if name.startswith("_"):
msg = "Option name %r is a protected Python identifier"
raise SyntaxError(msg % name)
if parser is not None and not callable(parser):
msg = "Option parser cannot be of type %s"
raise SyntaxError(msg % type(parser))
setattr(self, name, default)
#--------------------------------------------------------------------------
def __setattr__(self, name, value):
if not name.startswith("_"):
definition = self._settings_.get(name, (None, None))
if type(definition) not in (tuple, list):
definition = (definition, None)
parser = definition[0]
if parser is not None:
value = parser(value)
object.__setattr__(self, name, value)
#--------------------------------------------------------------------------
def check_params(self):
"""
Check if parameters are valid. Raises an exception otherwise.
This method only checks the validity of the arguments,
it won't modify them.
:raises ValueError: The parameters are incorrect.
"""
return
#--------------------------------------------------------------------------
def from_dictionary(self, args):
"""
Get the settings from a Python dictionary.
:param args: Settings.
:type args: dict(str -> \\*)
"""
for name, value in args.iteritems():
if name in self._settings_:
setattr(self, name, value)
#--------------------------------------------------------------------------
def from_object(self, args):
"""
Get the settings from the attributes of a Python object.
:param args:
Python object,
for example the command line arguments parsed by argparse.
:type args: object
"""
# Builds a dictionary with the object's public attributes.
args = {
k : getattr(args, k)
for k in dir(args) if not k.startswith("_")
}
# Remove all attributes whose values are None.
args = { k:v for k,v in args.iteritems() if v is not None }
# Extract the settings from the dictionary.
if args:
self.from_dictionary(args)
#--------------------------------------------------------------------------
def from_json(self, json_raw_data):
"""
Get the settings from a JSON encoded dictionary.
:param json_raw_data: JSON raw data.
:type json_raw_data: str
"""
# Converts the JSON data into a dictionary.
args = json_decode(json_raw_data)
if not isinstance(args, dict):
raise TypeError("Invalid JSON data")
# Extract the settings from the dictionary.
if args:
self.from_dictionary(args)
#--------------------------------------------------------------------------
def from_config_file(self, config_file, allow_profile = False):
"""
Get the settings from a configuration file.
:param config_file: Configuration file.
:type config_file: str
:param allow_profile: True to allow reading the profile name
from the config file, False to forbid it. Global config
files should allow setting a default profile, but profile
config files should not, as it wouldn't make sense.
"""
parser = RawConfigParser()
parser.read(config_file)
if parser.has_section("golismero"):
options = { k:v for k,v in parser.items("golismero") if v }
if "profile" in options:
if allow_profile:
self.profile = options["profile"]
self.profile_file = get_profile(self.profile)
else:
del options["profile"]
for k in self._forbidden_:
if k in options:
del options[k]
if options:
self.from_dictionary(options)
#--------------------------------------------------------------------------
def to_dictionary(self):
"""
Copy the settings to a Python dictionary.
:returns: Dictionary that maps the setting names to their values.
:rtype: dict(str -> \\*)
"""
result = {}
for name, definition in self._settings_.iteritems():
default = None
if type(definition) in (tuple, list) and len(definition) > 1:
default = definition[1]
value = getattr(self, name, default)
result[name] = value
return result
#--------------------------------------------------------------------------
def to_json(self):
"""
Copy the settings to a JSON encoded dictionary.
:returns: Settings as a JSON encoded dictionary.
:rtype: str
"""
# Extract the settings to a dictionary and encode it with JSON.
return json_encode( self.to_dictionary() )
#------------------------------------------------------------------------------
class OrchestratorConfig (Configuration):
"""
Orchestrator configuration object.
"""
#--------------------------------------------------------------------------
# The options definitions, they will be read from the config file:
#
_forbidden_ = set(( # except for these:
"config_file", "user_config_file",
"profile_file", "plugin_args", "ui_mode",
))
_settings_ = {
#
# Main options.
#
# UI mode.
"ui_mode": (str, "console"),
# Verbosity level.
"verbose": (Configuration.integer, 2),
# Colorize console?
"color": (Configuration.boolean, False),
#
# Plugin options.
#
# Enabled plugins.
"enable_plugins": (Configuration.comma_separated_list, ["all"]),
# Disabled plugins.
"disable_plugins": (Configuration.comma_separated_list, []),
# Plugins folder.
"plugins_folder": Configuration.string,
# Maximum number plugins to execute concurrently.
"max_concurrent": (Configuration.integer, 4),
#
# Network options.
#
# Maximum number of connections per host.
"max_connections": (Configuration.integer, 20),
# Use persistent cache?
"use_cache_db": (Configuration.boolean, True),
# When run as a service.
"listen_address": Configuration.string,
"listen_port": Configuration.integer,
"server_push": Configuration.string,
}
#--------------------------------------------------------------------------
# Options that are only set in runtime, not loaded from the config file.
# Configuration files.
config_file = get_default_config_file()
user_config_file = get_default_user_config_file()
# Profile.
profile = None
profile_file = None
# Plugin arguments.
plugin_args = dict() # plugin_id -> key -> value
#--------------------------------------------------------------------------
@staticmethod
def _load_profile(self, args):
if "profile" in args:
self.profile = args["profile"]
if isinstance(self.profile, unicode):
self.profile = self.profile.encode("UTF-8")
self.profile_file = get_profile(self.profile)
@staticmethod
def _load_plugin_args(self, args):
if "plugin_args" in args:
plugin_args = {}
for (plugin_id, target_args) in args["plugin_args"].iteritems():
if isinstance(plugin_id, unicode):
plugin_id = plugin_id.encode("UTF-8")
if not plugin_id in plugin_args:
plugin_args[plugin_id] = {}
for (key, value) in target_args.iteritems():
if isinstance(key, unicode):
key = key.encode("UTF-8")
if isinstance(value, unicode):
value = value.encode("UTF-8")
plugin_args[plugin_id][key] = value
self.plugin_args = plugin_args
def from_dictionary(self, args):
# Security note: do not copy config filenames!
# See the _forbidden_ property.
super(OrchestratorConfig, self).from_dictionary(args)
self._load_profile(self, args) # "self" is twice on purpose!
self._load_plugin_args(self, args) # don't change it or it breaks
def to_dictionary(self):
result = super(OrchestratorConfig, self).to_dictionary()
result["config_file"] = self.config_file
result["user_config_file"] = self.user_config_file
result["profile"] = self.profile
result["profile_file"] = self.profile_file
result["plugin_args"] = self.plugin_args
return result
#--------------------------------------------------------------------------
def check_params(self):
# Validate the network connections limit.
if self.max_connections < 1:
raise ValueError(
"Number of connections must be greater than 0,"
" got %i." % self.max_connections)
# Validate the number of concurrent processes.
if self.max_concurrent < 0:
raise ValueError(
"Number of processes cannot be a negative number,"
" got %i." % self.max_concurrent)
# Validate the list of plugins.
if not self.enable_plugins:
raise ValueError("No plugins selected for execution.")
if set(self.enable_plugins).intersection(self.disable_plugins):
raise ValueError(
"Conflicting plugins selection, aborting execution.")
#------------------------------------------------------------------------------
class AuditConfig (Configuration):
"""
Audit configuration object.
"""
#--------------------------------------------------------------------------
# The options definitions, they will be read from the config file:
#
_forbidden = set(( # except for these:
"config_file", "user_config_file", "profile_file", "plugin_args",
"plugin_load_overrides", "command",
))
_settings_ = {
#
# Main options
#
# Targets
"targets": (Configuration.comma_separated_list, []),
#
# Report options
#
# Output files
"reports": (Configuration.comma_separated_list, []),
# Only display vulnerabilities
"only_vulns": (Configuration.trinary, None),
#
# Audit options
#
# Audit name
"audit_name": Configuration.string,
# Audit database
"audit_db": (None, ":memory:"),
# Input files
"imports": (Configuration.comma_separated_list, []),
# Redo the audit?
"redo": (Configuration.boolean, False),
#
# Plugin options
#
# Enabled plugins
"enable_plugins": (Configuration.comma_separated_list, ["all"]),
# Disabled plugins
"disable_plugins": (Configuration.comma_separated_list, []),
# Plugin execution timeout
"plugin_timeout": (Configuration.float, 3600.0),
#
# Network options
#
# Include subdomains?
"include_subdomains": (Configuration.boolean, True),
# Include parent folders?
"allow_parent": (Configuration.boolean, True),
# Depth level for spider
"depth": (Configuration.integer_or_none, 1),
# Limits
"max_links" : (Configuration.integer, 0), # 0 -> infinite
# Follow redirects
"follow_redirects": (Configuration.boolean, True),
# Follow a redirection on the target URL itself,
# regardless of "follow_redirects"
"follow_first_redirect": (Configuration.boolean, True),
# Proxy options
"proxy_addr": Configuration.string,
"proxy_port": Configuration.integer,
"proxy_user": Configuration.string,
"proxy_pass": Configuration.string,
# Cookie
"cookie": Configuration.string,
# User Agent
"user_agent": Configuration.string,
}
#--------------------------------------------------------------------------
# Options that are only set in runtime, not loaded from the config file.
# Configuration files.
config_file = get_default_config_file()
user_config_file = get_default_user_config_file()
# Profiles.
profile = None
profile_file = None
# Plugin arguments.
plugin_args = None # list of (plugin_id, key, value)
# Plugin load overrides.
plugin_load_overrides = None
# Command to run.
command = "SCAN"
#--------------------------------------------------------------------------
def from_dictionary(self, args):
# Security note: do not copy config filenames!
# See the _forbidden_ property.
super(AuditConfig, self).from_dictionary(args)
OrchestratorConfig._load_profile(self, args) # not a filename
OrchestratorConfig._load_plugin_args(self, args)
# Load the "command" property.
if "command" in args:
self.command = args["command"]
if isinstance(self.command, unicode):
self.command = self.command.encode("UTF-8")
# Load the "plugin_load_overrides" property.
if "plugin_load_overrides" in args:
if not self.plugin_load_overrides:
self.plugin_load_overrides = []
for (val, plugin_id) in args["plugin_load_overrides"]:
self.plugin_load_overrides.append((bool(val), str(plugin_id)))
#--------------------------------------------------------------------------
def to_dictionary(self):
result = super(AuditConfig, self).to_dictionary()
result["config_file"] = self.config_file
result["user_config_file"] = self.user_config_file
result["profile"] = self.profile
result["profile_file"] = self.profile_file
result["plugin_args"] = self.plugin_args
result["command"] = self.command
result["plugin_load_overrides"] = self.plugin_load_overrides
return result
#--------------------------------------------------------------------------
@property
def targets(self):
return self._targets
@targets.setter
def targets(self, targets):
# Always append, never overwrite.
# Fix target URLs if the scheme part is missing.
# Make sure self._targets contains a list.
self._targets = getattr(self, "_targets", [])
# Ignore the trivial case.
if not targets:
return
# Strip whitespace.
targets = [
x.strip()
for x in targets
if x not in self._targets
]
# Remove duplicates.
targets = [
x
for x in set(targets)
if x not in self._targets
]
# Encode all Unicode strings as UTF-8.
targets = [
x.encode("UTF-8") if isinstance(x, unicode) else str(x)
for x in targets
if x not in self._targets
]
# Detect network ranges, like 30.30.30.0/24, and get all IPs on it.
parsed_targets = []
for host in targets:
# Try to parse the address as a network range.
try:
tmp_target = IPNetwork(host)
except:
parsed_targets.append(host)
continue
# If it's a range, iterate it and get all IP addresses.
# If it's a single IP address, just add it.
if tmp_target.size != 1:
parsed_targets.extend(
str(x) for x in tmp_target.iter_hosts()
)
else:
parsed_targets.append( str(tmp_target.ip) )
# Add the new targets.
self._targets.extend(parsed_targets)
@targets.deleter
def targets(self):
self._targets = []
#--------------------------------------------------------------------------
@property
def imports(self):
return self._imports
@imports.setter
def imports(self, imports):
# Always append, never overwrite.
self._imports = getattr(self, "_imports", [])
if imports:
self._imports.extend( (str(x) if x else None) for x in imports )
#--------------------------------------------------------------------------
@property
def reports(self):
return self._reports
@reports.setter
def reports(self, reports):
# Always append, never overwrite.
self._reports = getattr(self, "_reports", [])
if reports:
self._reports.extend( (str(x) if x else None) for x in reports )
#--------------------------------------------------------------------------
@property
def audit_db(self):
return self._audit_db
@audit_db.setter
def audit_db(self, audit_db):
if (
not audit_db or not audit_db.strip() or
audit_db.strip().lower() == ":auto:"
):
audit_db = ":auto:"
elif audit_db.strip().lower() == ":memory:":
audit_db = ":memory:"
self._audit_db = audit_db
#--------------------------------------------------------------------------
@property
def user_agent(self):
return self._user_agent
@user_agent.setter
def user_agent(self, user_agent):
if user_agent:
if isinstance(user_agent, unicode):
user_agent = user_agent.encode("UTF-8")
self._user_agent = user_agent
else:
self._user_agent = None
#--------------------------------------------------------------------------
@property
def cookie(self):
return self._cookie
@cookie.setter
def cookie(self, cookie):
if cookie:
# Parse the cookies argument.
try:
if isinstance(cookie, unicode):
cookie = cookie.encode("UTF-8")
# Prepare cookie.
cookie = cookie.replace(" ", "").replace("=", ":")
# Remove 'Cookie:' start, if exits.
if cookie.startswith("Cookie:"):
cookie = cookie[len("Cookie:"):]
# Split.
cookie = cookie.split(";")
# Parse.
cookie = { c.split(":")[0]:c.split(":")[1] for c in cookie}
except ValueError:
raise ValueError(
"Invalid cookie format specified."
" Use this format: 'Key=value; key=value'.")
else:
cookie = None
self._cookie = cookie
#--------------------------------------------------------------------------
@property
def proxy_addr(self):
return self._proxy_addr
@proxy_addr.setter
def proxy_addr(self, proxy_addr):
if proxy_addr:
proxy_addr = proxy_addr.strip()
if isinstance(proxy_addr, unicode):
proxy_addr = proxy_addr.encode("UTF-8")
if ":" in proxy_addr:
proxy_addr, proxy_port = proxy_addr.split(":", 1)
proxy_addr = proxy_addr.strip()
proxy_port = proxy_port.strip()
self.proxy_port = proxy_port
self._proxy_addr = proxy_addr
else:
self._proxy_addr = None
#--------------------------------------------------------------------------
@property
def proxy_port(self):
return self._proxy_port
@proxy_port.setter
def proxy_port(self, proxy_port):
if proxy_port:
self._proxy_port = int(proxy_port)
if self._proxy_port < 1 or self._proxy_port > 65534:
raise ValueError(
"Invalid proxy port number: %d" % self._proxy_port)
else:
self._proxy_port = None
#--------------------------------------------------------------------------
def check_params(self):
# Validate the list of plugins.
if not self.enable_plugins:
raise ValueError(
"No plugins selected for execution.")
if set(self.enable_plugins).intersection(self.disable_plugins):
raise ValueError(
"Conflicting plugins selection, aborting execution.")
# Validate the recursion depth.
if self.depth is not None and self.depth < 0:
raise ValueError(
"Spidering depth can't be negative: %r" % self.depth)
if self.depth is not None and self.depth == 0:
raise ValueError(
"Spidering depth can't be zero (nothing would be done!)")
#--------------------------------------------------------------------------
def is_new_audit(self):
"""
Determine if this is a brand new audit.
:returns: True if this is a new audit, False if it's an old audit.
:rtype: bool
"""
# Memory databases are always new audits.
if (
not self.audit_db or not self.audit_db.strip() or
self.audit_db.strip().lower() == ":memory:"
):
self.audit_db = ":memory:"
return True
# SQLite databases are new audits if the file doesn't exist.
# If we have no filename, use the audit name.
# If we don't have that either it's a new audit.
filename = self.audit_db
if not filename:
filename = self.audit_name + ".db"
if not filename:
return True
return not path.exists(filename)
| gpl-2.0 |
dednal/chromium.src | build/android/pylib/remote/device/remote_device_test_run.py | 9 | 9695 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run specific test on specific environment."""
import logging
import os
import sys
import tempfile
import time
import zipfile
from pylib import constants
from pylib.base import test_run
from pylib.remote.device import appurify_sanitized
from pylib.remote.device import remote_device_helper
from pylib.utils import zip_utils
class RemoteDeviceTestRun(test_run.TestRun):
"""Run gtests and uirobot tests on a remote device."""
WAIT_TIME = 5
COMPLETE = 'complete'
HEARTBEAT_INTERVAL = 300
def __init__(self, env, test_instance):
"""Constructor.
Args:
env: Environment the tests will run in.
test_instance: The test that will be run.
"""
super(RemoteDeviceTestRun, self).__init__(env, test_instance)
self._env = env
self._test_instance = test_instance
self._app_id = ''
self._test_id = ''
self._results = ''
self._test_run_id = ''
#override
def RunTests(self):
"""Run the test."""
if self._env.trigger:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_start_res = appurify_sanitized.api.tests_run(
self._env.token, self._env.device, self._app_id, self._test_id)
remote_device_helper.TestHttpResponse(
test_start_res, 'Unable to run test.')
self._test_run_id = test_start_res.json()['response']['test_run_id']
logging.info('Test run id: %s' % self._test_run_id)
if not self._env.collect:
assert isinstance(self._env.trigger, basestring), (
'File for storing test_run_id must be a string.')
with open(self._env.trigger, 'w') as test_run_id_file:
test_run_id_file.write(self._test_run_id)
if self._env.collect:
if not self._env.trigger:
assert isinstance(self._env.trigger, basestring), (
'File for storing test_run_id must be a string.')
with open(self._env.collect, 'r') as test_run_id_file:
self._test_run_id = test_run_id_file.read().strip()
current_status = ''
timeout_counter = 0
heartbeat_counter = 0
while self._GetTestStatus(self._test_run_id) != self.COMPLETE:
if self._results['detailed_status'] != current_status:
logging.info('Test status: %s', self._results['detailed_status'])
current_status = self._results['detailed_status']
timeout_counter = 0
heartbeat_counter = 0
if heartbeat_counter > self.HEARTBEAT_INTERVAL:
logging.info('Test status: %s', self._results['detailed_status'])
heartbeat_counter = 0
timeout = self._env.timeouts.get(
current_status, self._env.timeouts['unknown'])
if timeout_counter > timeout:
raise remote_device_helper.RemoteDeviceError(
'Timeout while in %s state for %s seconds'
% (current_status, timeout))
time.sleep(self.WAIT_TIME)
timeout_counter += self.WAIT_TIME
heartbeat_counter += self.WAIT_TIME
self._DownloadTestResults(self._env.results_path)
return self._ParseTestResults()
#override
def TearDown(self):
"""Tear down the test run."""
if (self._env.collect
and self._GetTestStatus(self._test_run_id) != self.COMPLETE):
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_abort_res = appurify_sanitized.api.tests_abort(
self._env.token, self._test_run_id, reason='Test runner exiting.')
remote_device_helper.TestHttpResponse(test_abort_res,
'Unable to abort test.')
def __enter__(self):
"""Set up the test run when used as a context manager."""
self.SetUp()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Tear down the test run when used as a context manager."""
self.TearDown()
#override
def SetUp(self):
"""Set up a test run."""
if self._env.trigger:
self._TriggerSetUp()
def _TriggerSetUp(self):
"""Set up the triggering of a test run."""
raise NotImplementedError
def _ParseTestResults(self):
raise NotImplementedError
def _GetTestByName(self, test_name):
"""Gets test_id for specific test.
Args:
test_name: Test to find the ID of.
"""
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_list_res = appurify_sanitized.api.tests_list(self._env.token)
remote_device_helper.TestHttpResponse(test_list_res,
'Unable to get tests list.')
for test in test_list_res.json()['response']:
if test['test_type'] == test_name:
return test['test_id']
raise remote_device_helper.RemoteDeviceError(
'No test found with name %s' % (test_name))
def _DownloadTestResults(self, results_path):
"""Download the test results from remote device service.
Args:
results_path: path to download results to.
"""
if results_path:
logging.info('Downloading results to %s.' % results_path)
if not os.path.exists(os.path.basename(results_path)):
os.makedirs(os.path.basename(results_path))
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
appurify_sanitized.utils.wget(self._results['results']['url'],
results_path)
def _GetTestStatus(self, test_run_id):
"""Checks the state of the test, and sets self._results
Args:
test_run_id: Id of test on on remote service.
"""
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_check_res = appurify_sanitized.api.tests_check_result(
self._env.token, test_run_id)
remote_device_helper.TestHttpResponse(test_check_res,
'Unable to get test status.')
self._results = test_check_res.json()['response']
return self._results['status']
def _AmInstrumentTestSetup(self, app_path, test_path, runner_package):
config = {'runner': runner_package}
self._app_id = self._UploadAppToDevice(app_path)
data_deps = self._test_instance.GetDataDependencies()
if data_deps:
with tempfile.NamedTemporaryFile(suffix='.zip') as test_with_deps:
sdcard_files = []
host_test = os.path.basename(test_path)
with zipfile.ZipFile(test_with_deps.name, 'w') as zip_file:
zip_file.write(test_path, host_test, zipfile.ZIP_DEFLATED)
for h, _ in data_deps:
zip_utils.WriteToZipFile(zip_file, h, '.')
if os.path.isdir(h):
sdcard_files.extend(os.listdir(h))
else:
sdcard_files.extend(h)
config['sdcard_files'] = ','.join(sdcard_files)
config['host_test'] = host_test
self._test_id = self._UploadTestToDevice(
'robotium', test_with_deps.name)
else:
self._test_id = self._UploadTestToDevice('robotium', test_path)
logging.info('Setting config: %s' % config)
self._SetTestConfig('robotium', config)
def _UploadAppToDevice(self, app_path):
"""Upload app to device."""
logging.info('Uploading %s to remote service.', app_path)
with open(app_path, 'rb') as apk_src:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
upload_results = appurify_sanitized.api.apps_upload(
self._env.token, apk_src, 'raw', name=self._test_instance.suite)
remote_device_helper.TestHttpResponse(
upload_results, 'Unable to upload %s.' % app_path)
return upload_results.json()['response']['app_id']
def _UploadTestToDevice(self, test_type, test_path):
"""Upload test to device
Args:
test_type: Type of test that is being uploaded. Ex. uirobot, gtest..
"""
logging.info('Uploading %s to remote service.' % test_path)
with open(test_path, 'rb') as test_src:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
upload_results = appurify_sanitized.api.tests_upload(
self._env.token, test_src, 'raw', test_type)
remote_device_helper.TestHttpResponse(upload_results,
'Unable to upload %s.' % test_path)
return upload_results.json()['response']['test_id']
def _SetTestConfig(self, runner_type, body):
"""Generates and uploads config file for test.
Args:
extras: Extra arguments to set in the config file.
"""
logging.info('Generating config file for test.')
with tempfile.TemporaryFile() as config:
config_data = ['[appurify]', '[%s]' % runner_type]
config_data.extend('%s=%s' % (k, v) for k, v in body.iteritems())
config.write(''.join('%s\n' % l for l in config_data))
config.flush()
config.seek(0)
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
config_response = appurify_sanitized.api.config_upload(
self._env.token, config, self._test_id)
remote_device_helper.TestHttpResponse(
config_response, 'Unable to upload test config.')
| bsd-3-clause |
Tomsod/gemrb | gemrb/GUIScripts/bg2/GUICG9.py | 7 | 2316 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, proficiencies (GUICG9)
import GemRB
from GUIDefines import *
from ie_stats import *
import LUProfsSelection
SkillWindow = 0
DoneButton = 0
MyChar = 0
def RedrawSkills():
ProfsPointsLeft = GemRB.GetVar ("ProfsPointsLeft")
if not ProfsPointsLeft:
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
else:
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
return
def OnLoad():
global SkillWindow, DoneButton, MyChar
GemRB.LoadWindowPack("GUICG", 640, 480)
SkillWindow = GemRB.LoadWindow(9)
MyChar = GemRB.GetVar ("Slot")
Levels = [GemRB.GetPlayerStat (MyChar, IE_LEVEL), GemRB.GetPlayerStat (MyChar, IE_LEVEL2), \
GemRB.GetPlayerStat (MyChar, IE_LEVEL3)]
LUProfsSelection.SetupProfsWindow (MyChar, \
LUProfsSelection.LUPROFS_TYPE_CHARGEN, SkillWindow, RedrawSkills, [0,0,0], Levels)
BackButton = SkillWindow.GetControl(77)
BackButton.SetText(15416)
BackButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BackPress)
DoneButton = SkillWindow.GetControl(0)
DoneButton.SetText(11973)
DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
RedrawSkills()
SkillWindow.SetVisible(WINDOW_VISIBLE)
return
def BackPress():
if SkillWindow:
SkillWindow.Unload()
GemRB.SetNextScript("CharGen6")
#scrap skills
return
def NextPress():
if SkillWindow:
SkillWindow.Unload()
LUProfsSelection.ProfsSave (MyChar, LUProfsSelection.LUPROFS_TYPE_CHARGEN)
GemRB.SetNextScript("CharGen7") #appearance
return
| gpl-2.0 |
wanghuan1115/sdkbox-vungle-sample | cpp/cocos2d/tools/gen-prebuilt/gen_prebuilt_libs.py | 80 | 13293 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# generate the prebuilt libs of engine
#
# Copyright 2014 (C) zhangbin
#
# License: MIT
# ----------------------------------------------------------------------------
'''
Generate the prebuilt libs of engine
'''
import os
import subprocess
import shutil
import sys
import excopy
import json
from argparse import ArgumentParser
if sys.platform == 'win32':
import _winreg
TESTS_PROJ_PATH = "tests/lua-tests"
ANDROID_SO_PATH = "project/proj.android/libs"
ANDROID_A_PATH = "project/proj.android/obj/local"
MK_PATH = "project/proj.android/jni/Application.mk"
CONSOLE_PATH = "tools/cocos2d-console/bin"
def os_is_win32():
return sys.platform == 'win32'
def os_is_mac():
return sys.platform == 'darwin'
def run_shell(cmd, cwd=None):
p = subprocess.Popen(cmd, shell=True, cwd=cwd)
p.wait()
if p.returncode:
raise subprocess.CalledProcessError(returncode=p.returncode, cmd=cmd)
return p.returncode
class Generator(object):
XCODE_CMD_FMT = "xcodebuild -project \"%s\" -configuration Release -target \"%s\" %s CONFIGURATION_BUILD_DIR=%s"
CONFIG_FILE = "build_config.json"
KEY_XCODE_PROJ_INFO = "xcode_proj_info"
KEY_WIN32_PROJ_INFO = "win32_proj_info"
KEY_OUTPUT_DIR = "outputdir"
KEY_TARGETS = "targets"
def __init__(self, args):
self.need_clean = args.need_clean
self.disable_strip = args.disable_strip
self.use_incredibuild = args.use_incredibuild
self.tool_dir = os.path.realpath(os.path.dirname(__file__))
self.no_android = args.no_android
self.engine_dir = os.path.join(self.tool_dir, os.path.pardir, os.path.pardir)
self.load_config()
def load_config(self):
cfg_json = os.path.join(self.tool_dir, Generator.CONFIG_FILE)
f = open(cfg_json)
cfg_info = json.load(f)
f.close()
self.xcode_proj_info = cfg_info[Generator.KEY_XCODE_PROJ_INFO]
self.win32_proj_info = cfg_info[Generator.KEY_WIN32_PROJ_INFO]
def modify_mk(self, mk_file):
if os.path.isfile(mk_file):
file_obj = open(mk_file, "a")
file_obj.write("\nAPP_ABI :=armeabi armeabi-v7a\n")
file_obj.close()
def build_android(self):
# build .a for android
console_dir = os.path.join(self.engine_dir, CONSOLE_PATH)
cmd_path = os.path.join(console_dir, "cocos")
proj_path = os.path.join(self.engine_dir, TESTS_PROJ_PATH)
# Add multi ABI in Application.mk
mk_file = os.path.join(proj_path, MK_PATH)
f = open(mk_file)
file_content = f.read()
f.close()
self.modify_mk(mk_file)
# build it
build_cmd = "%s compile -s %s -p android --ndk-mode release -j 4" % (cmd_path, proj_path)
run_shell(build_cmd)
f = open(mk_file, "w")
f.write(file_content)
f.close()
# copy .a to prebuilt dir
obj_dir = os.path.join(proj_path, ANDROID_A_PATH)
prebuilt_dir = os.path.join(self.tool_dir, "prebuilt", "android")
copy_cfg = {
"from": obj_dir,
"to": prebuilt_dir,
"include": [
"*.a$"
]
}
excopy.copy_files_with_config(copy_cfg, obj_dir, prebuilt_dir)
if not self.disable_strip:
# strip the android libs
ndk_root = os.environ["NDK_ROOT"]
if os_is_win32():
if self.is_32bit_windows():
bit_str = ""
else:
bit_str = "-x86_64"
sys_folder_name = "windows%s" % bit_str
elif os_is_mac():
sys_folder_name = "darwin-x86_64"
strip_cmd_path = os.path.join(ndk_root, "toolchains/arm-linux-androideabi-4.8/prebuilt/%s/arm-linux-androideabi/bin/strip" % sys_folder_name)
if os.path.exists(strip_cmd_path):
strip_cmd = "%s -S %s/armeabi*/*.a" % (strip_cmd_path, prebuilt_dir)
run_shell(strip_cmd)
def get_required_vs_version(self, proj_file):
# get the VS version required by the project
import re
file_obj = open(proj_file)
pattern = re.compile(r"^# Visual Studio.+(\d{4})")
num = None
for line in file_obj:
match = pattern.match(line)
if match is not None:
num = match.group(1)
break
if num is not None:
if num == "2012":
ret = "11.0"
elif num == "2013":
ret = "12.0"
else:
ret = None
else:
ret = None
return ret
def get_vs_cmd_path(self, require_version):
# find the VS in register, if system is 64bit, should find vs in both 32bit & 64bit register
if self.is_32bit_windows():
reg_flag_list = [ _winreg.KEY_WOW64_32KEY ]
else:
reg_flag_list = [ _winreg.KEY_WOW64_64KEY, _winreg.KEY_WOW64_32KEY ]
needUpgrade = False
vsPath = None
try:
for reg_flag in reg_flag_list:
print("find vs in reg : %s" % ("32bit" if reg_flag == _winreg.KEY_WOW64_32KEY else "64bit"))
vs = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\VisualStudio",
0,
_winreg.KEY_READ | reg_flag
)
try:
i = 0
while True:
try:
# enum the keys in vs reg
version = _winreg.EnumKey(vs, i)
find_ver = float(version)
# find the vs which version >= required version
if find_ver >= float(require_version):
key = _winreg.OpenKey(vs, r"SxS\VS7")
vsPath, type = _winreg.QueryValueEx(key, version)
if os.path.exists(vsPath):
if float(version) > float(require_version):
needUpgrade = True
break
else:
vsPath = None
except:
continue
finally:
i += 1
except:
pass
# if find one right vs, break
if vsPath is not None:
break
except WindowsError as e:
message = "Visual Studio wasn't installed"
print(e)
raise Exception(message)
commandPath = os.path.join(vsPath, "Common7", "IDE", "devenv")
return (needUpgrade, commandPath)
def is_32bit_windows(self):
arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()
archw = os.environ.has_key("PROCESSOR_ARCHITEW6432")
return (arch == "x86" and not archw)
def build_win32_proj(self, cmd_path, sln_path, proj_name, mode):
build_cmd = " ".join([
"\"%s\"" % cmd_path,
"\"%s\"" % sln_path,
"/%s \"Release|Win32\"" % mode,
"/Project \"%s\"" % proj_name
])
run_shell(build_cmd)
def build_win32(self):
print("Building Win32")
for key in self.win32_proj_info.keys():
output_dir = self.win32_proj_info[key][Generator.KEY_OUTPUT_DIR]
proj_path = os.path.join(self.engine_dir, key)
require_vs_version = self.get_required_vs_version(proj_path)
needUpgrade, vs_command = self.get_vs_cmd_path(require_vs_version)
# get the build folder & win32 output folder
build_folder_path = os.path.join(os.path.dirname(proj_path), "Release.win32")
if os.path.exists(build_folder_path):
shutil.rmtree(build_folder_path)
os.makedirs(build_folder_path)
win32_output_dir = os.path.join(self.tool_dir, output_dir)
if os.path.exists(win32_output_dir):
shutil.rmtree(win32_output_dir)
os.makedirs(win32_output_dir)
# upgrade projects
if needUpgrade:
commandUpgrade = ' '.join([
"\"%s\"" % vs_command,
"\"%s\"" % proj_path,
"/Upgrade"
])
run_shell(commandUpgrade)
if self.use_incredibuild:
# use incredibuild, build whole sln
build_cmd = " ".join([
"BuildConsole",
"%s" % proj_path,
"/build",
"/cfg=\"Release|Win32\""
])
run_shell(build_cmd)
if not self.use_incredibuild:
# build the projects
for proj_name in self.win32_proj_info[key][Generator.KEY_TARGETS]:
self.build_win32_proj(vs_command, proj_path, proj_name, "build")
lib_file_path = os.path.join(build_folder_path, "%s.lib" % proj_name)
if not os.path.exists(lib_file_path):
# if the lib is not generated, rebuild the project
self.build_win32_proj(vs_command, proj_path, proj_name, "rebuild")
if not os.path.exists(lib_file_path):
raise Exception("Library %s not generated as expected!" % lib_file_path)
# copy the libs into prebuilt dir
for file_name in os.listdir(build_folder_path):
file_path = os.path.join(build_folder_path, file_name)
shutil.copy(file_path, win32_output_dir)
print("Win32 build succeeded.")
def build_ios_mac(self):
for key in self.xcode_proj_info.keys():
output_dir = self.xcode_proj_info[key][Generator.KEY_OUTPUT_DIR]
proj_path = os.path.join(self.engine_dir, key)
ios_out_dir = os.path.join(self.tool_dir, output_dir, "ios")
mac_out_dir = os.path.join(self.tool_dir, output_dir, "mac")
ios_sim_libs_dir = os.path.join(ios_out_dir, "simulator")
ios_dev_libs_dir = os.path.join(ios_out_dir, "device")
for target in self.xcode_proj_info[key][Generator.KEY_TARGETS]:
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s iOS" % target, "-sdk iphonesimulator", ios_sim_libs_dir)
run_shell(build_cmd, self.tool_dir)
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s iOS" % target, "-sdk iphoneos", ios_dev_libs_dir)
run_shell(build_cmd, self.tool_dir)
build_cmd = Generator.XCODE_CMD_FMT % (proj_path, "%s Mac" % target, "", mac_out_dir)
run_shell(build_cmd, self.tool_dir)
# generate fat libs for iOS
for lib in os.listdir(ios_sim_libs_dir):
sim_lib = os.path.join(ios_sim_libs_dir, lib)
dev_lib = os.path.join(ios_dev_libs_dir, lib)
output_lib = os.path.join(ios_out_dir, lib)
lipo_cmd = "lipo -create -output \"%s\" \"%s\" \"%s\"" % (output_lib, sim_lib, dev_lib)
run_shell(lipo_cmd)
# remove the simulator & device libs in iOS
shutil.rmtree(ios_sim_libs_dir)
shutil.rmtree(ios_dev_libs_dir)
if not self.disable_strip:
# strip the libs
ios_strip_cmd = "xcrun -sdk iphoneos strip -S %s/*.a" % ios_out_dir
run_shell(ios_strip_cmd)
mac_strip_cmd = "xcrun strip -S %s/*.a" % mac_out_dir
run_shell(mac_strip_cmd)
def build_all_libs(self):
if os_is_mac():
# build for iOS & Mac
self.build_ios_mac()
if os_is_win32():
# build for win32
self.build_win32()
if not self.no_android:
self.build_android()
def do_generate(self):
output_dir = os.path.join(self.tool_dir, "prebuilt")
if self.need_clean and os.path.exists(output_dir):
shutil.rmtree(output_dir)
self.build_all_libs()
if __name__ == "__main__":
parser = ArgumentParser(description="Generate prebuilt engine for Cocos Engine.")
parser.add_argument('-c', dest='need_clean', action="store_true", help='Remove the \"prebuilt\" directory first.')
parser.add_argument('-n', "--no-android", dest='no_android', action="store_true", help='Not build android libs.')
parser.add_argument('-d', "--disable-strip", dest='disable_strip', action="store_true", help='Disable the strip of the generated libs.')
parser.add_argument('-i', "--incredibuild", dest='use_incredibuild', action="store_true", help='Use incredibuild to build win32 projects. Only available on windows.')
(args, unknown) = parser.parse_known_args()
if len(unknown) > 0:
print("unknown arguments: %s" % unknown)
gen_obj = Generator(args)
gen_obj.do_generate()
| mit |
wangjun/odoo | addons/base_report_designer/plugin/openerp_report_designer/test/test_fields.py | 391 | 1308 | #
# Use this module to retrive the fields you need according to the type
# of the OpenOffice operation:
# * Insert a Field
# * Insert a RepeatIn
#
import xmlrpclib
import time
sock = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/object')
def get(object, level=3, ending=None, ending_excl=None, recur=None, root=''):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
res = sock.execute('terp', 3, 'admin', 'account.invoice', 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
print root+'/'+k
if res[k]['type'] in recur:
print root+'/'+k
if (res[k]['type'] in recur) and (level>0):
get(res[k]['relation'], level-1, ending, ending_excl, recur, root+'/'+k)
print 'Field selection for a rields', '='*40
get('account.invoice', level=0, ending_excl=['one2many','many2one','many2many','reference'], recur=['many2one'])
print
print 'Field selection for a repeatIn', '='*40
get('account.invoice', level=0, ending=['one2many','many2many'], recur=['many2one'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aldebjer/pysim | doc/conf.py | 1 | 10355 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PySim documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 13:23:12 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from pysim import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
# 'sphinx.ext.imgmath',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySim'
copyright = '2014-2016, SSPA Sweden AB'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySimdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PySim.tex', 'PySim Documentation',
'Linus Aldebjer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysim', 'PySim Documentation',
['Linus Aldebjer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PySim', 'PySim Documentation',
'Linus Aldebjer', 'PySim', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'PySim'
epub_author = 'Linus Aldebjer'
epub_publisher = 'Linus Aldebjer'
epub_copyright = '2014-2016, SSPA Sweden AB'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'PySim'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| bsd-3-clause |
yonglehou/pybrain | pybrain/rl/environments/shipsteer/viewer.py | 25 | 12969 | from __future__ import print_function
__author__ = 'Frank Sehnke, [email protected]'
#@PydevCodeAnalysisIgnore
#########################################################################
# OpenGL viewer for the FlexCube Environment
#
# The FlexCube Environment is a Mass-Spring-System composed of 8 mass points.
# These resemble a cube with flexible edges.
#
# This viewer uses an UDP connection found in tools/networking/udpconnection.py
#
# The viewer recieves the position matrix of the 8 masspoints and the center of gravity.
# With this information it renders a Glut based 3d visualization of teh FlexCube
#
# Options:
# - serverIP: The ip of the server to which the viewer should connect
# - ownIP: The IP of the computer running the viewer
# - port: The starting port (2 adjacent ports will be used)
#
# Saving the images is possible by setting self.savePics=True.
# Changing the point and angle of view is possible by using the mouse
# while button 1 or 2 pressed.
#
# Requirements: OpenGL
#
#########################################################################
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLE import *
from OpenGL.GLU import *
from time import sleep
from scipy import ones, array, cos, sin
from pybrain.tools.networking.udpconnection import UDPClient
class FlexCubeRenderer(object):
#Options: ServerIP(default:localhost), OwnIP(default:localhost), Port(default:21560)
def __init__(self, servIP="127.0.0.1", ownIP="127.0.0.1", port="21580"):
self.oldScreenValues = None
self.view = 0
self.worldRadius = 400
# Start of mousepointer
self.lastx = 0
self.lasty = 15
self.lastz = 300
self.zDis = 1
# Start of cube
self.cube = [0.0, 0.0, 0.0]
self.bmpCount = 0
self.actCount = 0
self.calcPhysics = 0
self.newPic = 1
self.picCount = 0
self.sensors = [0.0, 0.0, 0.0]
self.centerOfGrav = array([0.0, 5.0, 0.0])
self.savePics = False
self.drawCounter = 0
self.fps = 50
self.dt = 1.0 / float(self.fps)
self.step = 0
self.client = UDPClient(servIP, ownIP, port)
# If self.savePics=True this method saves the produced images
def saveTo(self, filename, format="JPEG"):
import Image # get PIL's functionality...
width, height = 800, 600
glPixelStorei(GL_PACK_ALIGNMENT, 1)
data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)
image = Image.fromstring("RGB", (width, height), data)
image = image.transpose(Image.FLIP_TOP_BOTTOM)
image.save(filename, format)
print(('Saved image to ', filename))
return image
# the render method containing the Glut mainloop
def _render(self):
# Call init: Parameter(Window Position -> x, y, height, width)
self.init_GL(self, 300, 300, 800, 600)
self.quad = gluNewQuadric()
glutMainLoop()
# The Glut idle function
def drawIdleScene(self):
#recive data from server and update the points of the cube
try: self.sensors = self.client.listen(self.sensors)
except: pass
if self.sensors == ["r", "r", "r"]: self.centerOfGrav = array([0.0, 5.0, 0.0])
else:
self.step += 1
a = self.sensors[0] / 360.0 * 3.1428
dir = array([cos(a), 0.0, -sin(a)])
self.centerOfGrav += self.sensors[2] * dir * 0.02
self.drawScene()
if self.savePics:
self.saveTo("./screenshots/image_jump" + repr(10000 + self.picCount) + ".jpg")
self.picCount += 1
else: sleep(self.dt)
def drawScene(self):
''' This methode describes the complete scene.'''
# clear the buffer
if self.zDis < 10: self.zDis += 0.25
if self.lastz > 100: self.lastz -= self.zDis
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
# Point of view
glRotatef(self.lastx, 0.0, 1.0, 0.0)
glRotatef(self.lasty, 1.0, 0.0, 0.0)
#glRotatef(15, 0.0, 0.0, 1.0)
# direction of view is aimed to the center of gravity of the cube
glTranslatef(-self.centerOfGrav[0], -self.centerOfGrav[1] - 50.0, -self.centerOfGrav[2] - self.lastz)
#Objects
#Massstab
for lk in range(41):
if float(lk - 20) / 10.0 == (lk - 20) / 10:
glColor3f(0.75, 0.75, 0.75)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -30)
quad = gluNewQuadric()
gluCylinder(quad, 2, 2, 60, 4, 1)
glPopMatrix()
else:
if float(lk - 20) / 5.0 == (lk - 20) / 5:
glColor3f(0.75, 0.75, 0.75)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -15.0)
quad = gluNewQuadric()
gluCylinder(quad, 1, 1, 30, 4, 1)
glPopMatrix()
else:
glColor3f(0.75, 0.75, 0.75)
glPushMatrix()
glRotatef(90, 1, 0, 0)
glTranslate(self.worldRadius / 40.0 * float(lk) - self.worldRadius / 2.0, -40.0, -7.5)
quad = gluNewQuadric()
gluCylinder(quad, 0.5, 0.5, 15, 4, 1)
glPopMatrix()
# Floor
tile = self.worldRadius / 40.0
glEnable (GL_BLEND)
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor3f(0.8, 0.8, 0.5)
glPushMatrix()
glTranslatef(0.0, -3.0, 0.0)
glBegin(GL_QUADS)
glNormal(0.0, 1.0, 0.0)
glVertex3f(-self.worldRadius, 0.0, -self.worldRadius)
glVertex3f(-self.worldRadius, 0.0, self.worldRadius)
glVertex3f(self.worldRadius, 0.0, self.worldRadius)
glVertex3f(self.worldRadius, 0.0, -self.worldRadius)
glEnd()
glPopMatrix()
#Water
for xF in range(40):
for yF in range(40):
if float(xF + yF) / 2.0 == (xF + yF) / 2: glColor4f(0.7, 0.7, 1.0, 0.5)
else: glColor4f(0.9, 0.9, 1.0, 0.5)
glPushMatrix()
glTranslatef(0.0, -0.03, 0.0)
glBegin(GL_QUADS)
glNormal(0.5 + sin(float(xF) + float(self.step) / 4.0) * 0.5, 0.5 + cos(float(xF) + float(self.step) / 4.0) * 0.5, 0.0)
for i in range(2):
for k in range(2):
glVertex3f((i + xF - 20) * tile, sin(float(xF + i) + float(self.step) / 4.0) * 3.0, ((k ^ i) + yF - 20) * tile)
glEnd()
glPopMatrix()
self.ship()
# swap the buffer
glutSwapBuffers()
def ship(self):
glColor3f(0.4, 0.1, 0.2)
glPushMatrix()
glTranslate(self.centerOfGrav[0] + 14, self.centerOfGrav[1], self.centerOfGrav[2])
glRotatef(180 - self.sensors[0], 0.0, 1.0, 0.0)
self.cuboid(0, 0, 0, 20, 5, 5)
#bow of ship
glBegin(GL_TRIANGLES)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 5, 5]), self.points2Vector([-5, 6, 2.5], [0, 5, 0])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 5, 0), glVertex3f(0, 5, 5)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 0, 5]), self.points2Vector([-5, 6, 2.5], [0, 5, 5])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 0, 5), glVertex3f(0, 5, 5)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 0, 0]), self.points2Vector([-5, 6, 2.5], [0, 0, 5])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 0, 5), glVertex3f(0, 0, 0)
glNormal3fv(self.calcNormal(self.points2Vector([-5, 6, 2.5], [0, 5, 0]), self.points2Vector([-5, 6, 2.5], [0, 0, 0])))
glVertex3f(-5, 6, 2.5), glVertex3f(0, 0, 0), glVertex3f(0, 5, 0)
glEnd()
# stern
glPushMatrix()
glRotatef(-90, 1.0, 0.0, 0.0)
glTranslatef(15, -2.5, 0)
gluCylinder(self.quad, 2.5, 2.5, 5, 10, 1)
glTranslatef(0, 0, 5)
gluDisk(self.quad, 0, 2.5, 10, 1)
glPopMatrix()
# deck
if abs(self.sensors[0]) < 5.0: reward = (self.sensors[2] + 10.0) / 50.0
else: reward = 0.2
glColor3f(1.0 - reward, reward, 0)
self.cuboid(5, 5, 1, 10, 8, 4)
glPushMatrix()
glRotatef(-90, 1.0, 0.0, 0.0)
glTranslatef(13, -2.5, 5)
glColor3f(1, 1, 1)
gluCylinder(self.quad, 1, 0.8, 5, 20, 1)
glPopMatrix()
glPopMatrix()
def cuboid(self, x0, y0, z0, x1, y1, z1):
glBegin(GL_QUADS)
glNormal(0, 0, 1)
glVertex3f(x0, y0, z1); glVertex3f(x0, y1, z1); glVertex3f(x1, y1, z1); glVertex3f(x1, y0, z1) #front
glNormal(-1, 0, 0)
glVertex3f(x0, y0, z0); glVertex3f(x0, y0, z1); glVertex3f(x0, y1, z1); glVertex3f(x0, y1, z0) # left
glNormal(0, -1, 0)
glVertex3f(x0, y0, z0); glVertex3f(x0, y0, z1); glVertex3f(x1, y0, z1); glVertex3f(x1, y0, z0) # bottom
glNormal(0, 0, -1)
glVertex3f(x0, y0, z0); glVertex3f(x1, y0, z0); glVertex3f(x1, y1, z0); glVertex3f(x0, y1, z0) # back
glNormal(0, 1, 0)
glVertex3f(x0, y1, z0); glVertex3f(x1, y1, z0); glVertex3f(x1, y1, z1); glVertex3f(x0, y1, z1) # top
glNormal(1, 0, 0)
glVertex3f(x1, y0, z0); glVertex3f(x1, y0, z1); glVertex3f(x1, y1, z1); glVertex3f(x1, y1, z0) # right
glEnd()
def calcNormal(self, xVector, yVector):
result = [0, 0, 0]
result[0] = xVector[1] * yVector[2] - yVector[1] * xVector[2]
result[1] = -xVector[0] * yVector[2] + yVector[0] * xVector[2]
result[2] = xVector[0] * yVector[1] - yVector[0] * xVector[1]
return [result[0], result[1], result[2]]
def points2Vector(self, startPoint, endPoint):
result = [0, 0, 0]
result[0] = endPoint[0] - startPoint[0]
result[1] = endPoint[1] - startPoint[1]
result[2] = endPoint[2] - startPoint[2]
return [result[0], result[1], result[2]]
def resizeScene(self, width, height):
'''Needed if window size changes.'''
if height == 0: # Prevent A Divide By Zero If The Window Is Too Small
height = 1
glViewport(0, 0, width, height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(width) / float(height), 0.1, 700.0)
glMatrixMode(GL_MODELVIEW)
def activeMouse(self, x, y):
#Returns mouse coordinates while any mouse button is pressed.
# store the mouse coordinate
if self.mouseButton == GLUT_LEFT_BUTTON:
self.lastx = x - self.xOffset
self.lasty = y - self.yOffset
if self.mouseButton == GLUT_RIGHT_BUTTON:
self.lastz = y - self.zOffset
# redisplay
glutPostRedisplay()
def passiveMouse(self, x, y):
'''Returns mouse coordinates while no mouse button is pressed.'''
pass
def completeMouse(self, button, state, x, y):
#Returns mouse coordinates and which button was pressed resp. released.
self.mouseButton = button
if state == GLUT_DOWN:
self.xOffset = x - self.lastx
self.yOffset = y - self.lasty
self.zOffset = y - self.lastz
# redisplay
glutPostRedisplay()
#Initialise an OpenGL windows with the origin at x, y and size of height, width.
def init_GL(self, pyWorld, x, y, height, width):
# initialize GLUT
glutInit([])
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH)
glutInitWindowSize(height, width)
glutInitWindowPosition(x, y)
glutCreateWindow("The Curious Cube")
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_MODELVIEW)
# initialize lighting */
glLightfv(GL_LIGHT0, GL_DIFFUSE, [1, 1, 1, 1.0])
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
#
glColorMaterial(GL_FRONT, GL_DIFFUSE)
glEnable(GL_COLOR_MATERIAL)
# Automatic vector normalise
glEnable(GL_NORMALIZE)
### Instantiate the virtual world ###
glutDisplayFunc(pyWorld.drawScene)
glutMotionFunc(pyWorld.activeMouse)
glutMouseFunc(pyWorld.completeMouse)
glutReshapeFunc(pyWorld.resizeScene)
glutIdleFunc(pyWorld.drawIdleScene)
if __name__ == '__main__':
s = sys.argv[1:]
r = FlexCubeRenderer(*s)
r._render()
| bsd-3-clause |
Phonemetra/TurboCoin | test/functional/rpc_deprecated.py | 1 | 1168 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import TurbocoinTestFramework
# from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(TurbocoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], []]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# In set_test_params:
# self.extra_args = [[], ["-deprecatedrpc=generate"]]
#
# In run_test:
# self.log.info("Test generate RPC")
# assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1)
# self.nodes[1].generate(1)
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| mit |
storborg/axibot | axibot/colors.py | 1 | 1166 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from operator import itemgetter
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
pen_sets = {
'precise-v5': {
'black': (59, 59, 59),
'blue': (61, 93, 134),
'red': (138, 56, 60),
'green': (52, 126, 101),
'purple': (93, 90, 179),
'lightblue': (69, 153, 189),
'pink': (225, 87, 146),
}
}
def rgb_to_lab(rgb):
rgb_color = sRGBColor(rgb[0], rgb[1], rgb[2])
lab_color = convert_color(rgb_color, LabColor)
return lab_color.lab_l, lab_color.lab_a, lab_color.lab_b
def perceptual_distance(a, b):
a = rgb_to_lab(a)
b = rgb_to_lab(b)
return math.sqrt((b[2] - a[2])**2 +
(b[1] - a[1])**2 +
(b[0] - a[0])**2)
def find_pen_match(color, pen_set):
scores = {}
for pen, pen_color in pen_sets[pen_set].items():
scores[pen] = perceptual_distance(color, pen_color)
scores = scores.items()
scores.sort(key=itemgetter(1))
return scores[0]
| gpl-2.0 |
fossoult/odoo | openerp/__init__.py | 235 | 3586 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if sys.modules.get("gevent") is not None:
evented = True
# Is the server running in pefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name=None):
"""
Return the model registry for the given database, or the database mentioned
on the current thread. If the registry does not exist yet, it is created on
the fly.
"""
if database_name is None:
import threading
database_name = threading.currentThread().dbname
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
karllessard/tensorflow | tensorflow/python/kernel_tests/cumulative_logsumexp_test.py | 15 | 4359 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for cumulative_logsumexp op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CumulativeLogsumexpTest(test.TestCase):
valid_dtypes = [dtypes.float32, dtypes.float64]
def _computeLogSumExp(self, x, **kwargs):
result_naive = math_ops.cumsum(math_ops.exp(x), **kwargs)
result_fused = math_ops.exp(math_ops.cumulative_logsumexp(x, **kwargs))
return result_naive, result_fused
def _testLogSumExp(self, x, dtype=dtypes.float32, use_gpu=False, **kwargs):
with self.cached_session(use_gpu=use_gpu):
x = ops.convert_to_tensor(x, dtype=dtype)
result_naive, result_fused = self.evaluate(
self._computeLogSumExp(x, **kwargs))
self.assertAllClose(result_naive, result_fused)
def _testLogSumExpAllArgs(self, x, axis=0, use_gpu=False):
for dtype in self.valid_dtypes:
for reverse in (True, False):
for exclusive in (True, False):
self._testLogSumExp(
x, dtype=dtype, use_gpu=use_gpu,
reverse=reverse, exclusive=exclusive,
axis=axis)
def testMinusInfinity(self):
x = np.log([0., 0., 1., 1., 1., 1., 0., 0.])
self._testLogSumExpAllArgs(x, use_gpu=False)
self._testLogSumExpAllArgs(x, use_gpu=True)
def test1D(self):
x = np.arange(10) / 10.0 - 0.5
self._testLogSumExpAllArgs(x, use_gpu=False)
self._testLogSumExpAllArgs(x, use_gpu=True)
def test2D(self):
x = np.reshape(np.arange(20) / 20.0 - 0.5, (2, 10))
for axis in (-2, -1, 0, 1):
self._testLogSumExpAllArgs(x, axis=axis, use_gpu=False)
self._testLogSumExpAllArgs(x, axis=axis, use_gpu=True)
def _testGradient(self, x, use_gpu=False, **kwargs):
with self.cached_session(use_gpu=use_gpu):
x = ops.convert_to_tensor(x, dtype=dtypes.float64)
grad_naive_theoretical, _ = gradient_checker_v2.compute_gradient(
lambda y: math_ops.cumsum(math_ops.exp(y), **kwargs), [x])
grad_fused_theoretical, _ = gradient_checker_v2.compute_gradient(
lambda y: math_ops.exp(math_ops.cumulative_logsumexp(y, **kwargs)),
[x])
self.assertAllClose(grad_fused_theoretical, grad_naive_theoretical)
def testGradient(self):
for reverse in (True, False):
for exclusive in (True, False):
x = np.arange(10) / 10.0 - 0.5
self._testGradient(x, use_gpu=False,
reverse=reverse, exclusive=exclusive)
self._testGradient(x, use_gpu=True,
reverse=reverse, exclusive=exclusive)
def _logSumExpMap(self, x):
return map_fn.map_fn(
lambda i: math_ops.reduce_logsumexp(x[:i + 1]),
math_ops.range(array_ops.shape(x)[0]),
dtype=x.dtype)
def test1DLarge(self):
# This test ensures that the operation is correct even when the naive
# implementation would overflow.
x_np = np.arange(20) * 20.0
for use_gpu in (True, False):
with self.cached_session(use_gpu=use_gpu):
x_tf = ops.convert_to_tensor(x_np, dtype=dtypes.float32)
result_fused = self.evaluate(math_ops.cumulative_logsumexp(x_tf))
result_map = self.evaluate(self._logSumExpMap(x_tf))
self.assertAllClose(result_fused, result_map)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ssvsergeyev/ZenPacks.zenoss.AWS | src/boto/tests/unit/beanstalk/test_exception.py | 114 | 2085 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.beanstalk.exception import simple
from tests.compat import unittest
class FakeError(object):
def __init__(self, code, status, reason, body):
self.code = code
self.status = status
self.reason = reason
self.body = body
class TestExceptions(unittest.TestCase):
def test_exception_class_names(self):
# Create exception from class name
error = FakeError('TooManyApplications', 400, 'foo', 'bar')
exception = simple(error)
self.assertEqual(exception.__class__.__name__, 'TooManyApplications')
# Create exception from class name + 'Exception' as seen from the
# live service today
error = FakeError('TooManyApplicationsException', 400, 'foo', 'bar')
exception = simple(error)
self.assertEqual(exception.__class__.__name__, 'TooManyApplications')
# Make sure message body is present
self.assertEqual(exception.message, 'bar')
| gpl-2.0 |
jnewland/home-assistant | homeassistant/components/wemo/binary_sensor.py | 7 | 4329 | """Support for WeMo binary sensors."""
import asyncio
import logging
import async_timeout
import requests
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.exceptions import PlatformNotReady
from . import SUBSCRIPTION_REGISTRY
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Register discovered WeMo binary sensors."""
from pywemo import discovery
if discovery_info is not None:
location = discovery_info['ssdp_description']
mac = discovery_info['mac_address']
try:
device = discovery.device_from_description(location, mac)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as err:
_LOGGER.error('Unable to access %s (%s)', location, err)
raise PlatformNotReady
if device:
add_entities([WemoBinarySensor(hass, device)])
class WemoBinarySensor(BinarySensorDevice):
"""Representation a WeMo binary sensor."""
def __init__(self, hass, device):
"""Initialize the WeMo sensor."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo sensor."""
_LOGGER.debug("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(
self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Wemo sensor added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = SUBSCRIPTION_REGISTRY
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo sensor is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning('Lost connection to %s', self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
def _update(self, force_update=True):
"""Update the sensor state."""
try:
self._state = self.wemo.get_state(force_update)
if not self._available:
_LOGGER.info('Reconnected to %s', self.name)
self._available = True
except AttributeError as err:
_LOGGER.warning("Could not update status for %s (%s)",
self.name, err)
self._available = False
@property
def unique_id(self):
"""Return the id of this WeMo sensor."""
return self._serialnumber
@property
def name(self):
"""Return the name of the service if any."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def available(self):
"""Return true if sensor is available."""
return self._available
| apache-2.0 |
payet-s/pyrser | pyrser/directives/ignore.py | 2 | 1577 | from pyrser import meta, parsing
@meta.rule(parsing.Parser, "Base.ignore_cxx")
def ignore_cxx(self) -> bool:
"""Consume comments and whitespace characters."""
self._stream.save_context()
while not self.read_eof():
idxref = self._stream.index
if self._stream.peek_char in " \t\v\f\r\n":
while (not self.read_eof()
and self._stream.peek_char in " \t\v\f\r\n"):
self._stream.incpos()
if self.peek_text("//"):
while not self.read_eof() and not self.peek_char("\n"):
self._stream.incpos()
if not self.read_char("\n") and self.read_eof():
return self._stream.validate_context()
if self.peek_text("/*"):
while not self.read_eof() and not self.peek_text("*/"):
self._stream.incpos()
if not self.read_text("*/") and self.read_eof():
return self._stream.restore_context()
if idxref == self._stream.index:
break
return self._stream.validate_context()
@meta.directive("ignore")
class Ignore(parsing.DirectiveWrapper):
def begin(self, parser, convention: str):
if convention == "null":
parser.push_ignore(parsing.Parser.ignore_null)
if convention == "C/C++":
parser.push_ignore(parsing.Parser.ignore_cxx)
if convention == "blanks":
parser.push_ignore(parsing.Parser.ignore_blanks)
return True
def end(self, parser, convention: str):
parser.pop_ignore()
return True
| gpl-3.0 |
TheMOOCAgency/edx-platform | openedx/core/djangoapps/content/block_structure/signals.py | 13 | 1230 | """
Signal handlers for invalidating cached data.
"""
from django.conf import settings
from django.dispatch.dispatcher import receiver
from xmodule.modulestore.django import SignalHandler
from .api import clear_course_from_cache
from .tasks import update_course_in_cache
@receiver(SignalHandler.course_published)
def _listen_for_course_publish(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been published in the module
store and creates/updates the corresponding cache entry.
"""
clear_course_from_cache(course_key)
# The countdown=0 kwarg ensures the call occurs after the signal emitter
# has finished all operations.
update_course_in_cache.apply_async(
[unicode(course_key)],
countdown=settings.BLOCK_STRUCTURES_SETTINGS['BLOCK_STRUCTURES_COURSE_PUBLISH_TASK_DELAY'],
)
@receiver(SignalHandler.course_deleted)
def _listen_for_course_delete(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been deleted from the
module store and invalidates the corresponding cache entry if one
exists.
"""
clear_course_from_cache(course_key)
| agpl-3.0 |
letaureau/b-tk.core | Testing/Python/SeparateKnownVirtualMarkersFilterTest.py | 4 | 18217 | import btk
import unittest
import _TDDConfigure
import numpy
class SeparateKnownVirtualMarkersFilterTest(unittest.TestCase):
def test_Constructor(self):
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
labels = skvm.GetVirtualReferenceFrames()
num = 19
self.assertEqual(labels.size(), num)
it = labels.begin()
if (labels.size() >= num):
# HED
self.assertEqual(it.value().Origin, 'HEDO')
self.assertEqual(it.value().Axis1, 'HEDA')
self.assertEqual(it.value().Axis2, 'HEDL')
self.assertEqual(it.value().Axis3, 'HEDP')
it.incr()
# LCL
self.assertEqual(it.value().Origin, 'LCLO')
self.assertEqual(it.value().Axis1, 'LCLA')
self.assertEqual(it.value().Axis2, 'LCLL')
self.assertEqual(it.value().Axis3, 'LCLP')
it.incr()
# LFE
self.assertEqual(it.value().Origin, 'LFEO')
self.assertEqual(it.value().Axis1, 'LFEA')
self.assertEqual(it.value().Axis2, 'LFEL')
self.assertEqual(it.value().Axis3, 'LFEP')
it.incr()
# LFO
self.assertEqual(it.value().Origin, 'LFOO')
self.assertEqual(it.value().Axis1, 'LFOA')
self.assertEqual(it.value().Axis2, 'LFOL')
self.assertEqual(it.value().Axis3, 'LFOP')
it.incr()
# LHN
self.assertEqual(it.value().Origin, 'LHNO')
self.assertEqual(it.value().Axis1, 'LHNA')
self.assertEqual(it.value().Axis2, 'LHNL')
self.assertEqual(it.value().Axis3, 'LHNP')
it.incr()
# LHU
self.assertEqual(it.value().Origin, 'LHUO')
self.assertEqual(it.value().Axis1, 'LHUA')
self.assertEqual(it.value().Axis2, 'LHUL')
self.assertEqual(it.value().Axis3, 'LHUP')
it.incr()
# LRA
self.assertEqual(it.value().Origin, 'LRAO')
self.assertEqual(it.value().Axis1, 'LRAA')
self.assertEqual(it.value().Axis2, 'LRAL')
self.assertEqual(it.value().Axis3, 'LRAP')
it.incr()
# LTI
self.assertEqual(it.value().Origin, 'LTIO')
self.assertEqual(it.value().Axis1, 'LTIA')
self.assertEqual(it.value().Axis2, 'LTIL')
self.assertEqual(it.value().Axis3, 'LTIP')
it.incr()
# LTO
self.assertEqual(it.value().Origin, 'LTOO')
self.assertEqual(it.value().Axis1, 'LTOA')
self.assertEqual(it.value().Axis2, 'LTOL')
self.assertEqual(it.value().Axis3, 'LTOP')
it.incr()
# PEL
self.assertEqual(it.value().Origin, 'PELO')
self.assertEqual(it.value().Axis1, 'PELA')
self.assertEqual(it.value().Axis2, 'PELL')
self.assertEqual(it.value().Axis3, 'PELP')
it.incr()
# RCL
self.assertEqual(it.value().Origin, 'RCLO')
self.assertEqual(it.value().Axis1, 'RCLA')
self.assertEqual(it.value().Axis2, 'RCLL')
self.assertEqual(it.value().Axis3, 'RCLP')
it.incr()
# RFE
self.assertEqual(it.value().Origin, 'RFEO')
self.assertEqual(it.value().Axis1, 'RFEA')
self.assertEqual(it.value().Axis2, 'RFEL')
self.assertEqual(it.value().Axis3, 'RFEP')
it.incr()
# RFO
self.assertEqual(it.value().Origin, 'RFOO')
self.assertEqual(it.value().Axis1, 'RFOA')
self.assertEqual(it.value().Axis2, 'RFOL')
self.assertEqual(it.value().Axis3, 'RFOP')
it.incr()
# RHN
self.assertEqual(it.value().Origin, 'RHNO')
self.assertEqual(it.value().Axis1, 'RHNA')
self.assertEqual(it.value().Axis2, 'RHNL')
self.assertEqual(it.value().Axis3, 'RHNP')
it.incr()
# RHU
self.assertEqual(it.value().Origin, 'RHUO')
self.assertEqual(it.value().Axis1, 'RHUA')
self.assertEqual(it.value().Axis2, 'RHUL')
self.assertEqual(it.value().Axis3, 'RHUP')
it.incr()
# RRA
self.assertEqual(it.value().Origin, 'RRAO')
self.assertEqual(it.value().Axis1, 'RRAA')
self.assertEqual(it.value().Axis2, 'RRAL')
self.assertEqual(it.value().Axis3, 'RRAP')
it.incr()
# RTI
self.assertEqual(it.value().Origin, 'RTIO')
self.assertEqual(it.value().Axis1, 'RTIA')
self.assertEqual(it.value().Axis2, 'RTIL')
self.assertEqual(it.value().Axis3, 'RTIP')
it.incr()
# RTO
self.assertEqual(it.value().Origin, 'RTOO')
self.assertEqual(it.value().Axis1, 'RTOA')
self.assertEqual(it.value().Axis2, 'RTOL')
self.assertEqual(it.value().Axis3, 'RTOP')
it.incr()
# TRX
self.assertEqual(it.value().Origin, 'TRXO')
self.assertEqual(it.value().Axis1, 'TRXA')
self.assertEqual(it.value().Axis2, 'TRXL')
self.assertEqual(it.value().Axis3, 'TRXP')
it.incr()
labels2 = skvm.GetVirtualMarkers()
num = 2
self.assertEqual(labels2.size(), num)
it2 = labels2.begin()
if (labels2.size() >= num):
self.assertEqual(it2.value(), 'CentreOfMass')
it2.incr()
self.assertEqual(it2.value(), 'CentreOfMassFloor')
def test_DefaultLabels(self):
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(_TDDConfigure.C3DFilePathIN + 'sample09/PlugInC3D.c3d')
reader.Update()
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
skvm.SetInput(reader.GetOutput().GetPoints())
skvm.Update()
# markers
points = skvm.GetOutput(0)
self.assertEqual(points.GetItemNumber(), 18)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'RKNE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIB'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RASI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTHI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RHEE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LKNE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RANK'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RCLA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTHI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LASI'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'C7'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LANK'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'SACR'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LHEE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LCLA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIB');
# virtual used for axes
points = skvm.GetOutput(1)
self.assertEqual(points.GetItemNumber(), 36)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'LTOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'PELP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'RTOP')
# other virtual markers
points = skvm.GetOutput(2)
self.assertEqual(points.GetItemNumber(), 0)
# other type of points
points = skvm.GetOutput(3)
self.assertEqual(points.GetItemNumber(), 32)
def test_DefaultLabelsAndPrefix(self):
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(_TDDConfigure.C3DFilePathIN + 'sample04/sub_labels.c3d')
reader.Update()
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
skvm.SetInput(reader.GetOutput().GetPoints())
skvm.SetLabelPrefix('Matt:')
skvm.Update()
# markers
points = skvm.GetOutput(0)
self.assertEqual(points.GetItemNumber(), 50)
inc = 0
# virtual used for axes
points = skvm.GetOutput(1)
self.assertEqual(points.GetItemNumber(), 36)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:PELP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOP')
# other virtual markers
points = skvm.GetOutput(2)
self.assertEqual(points.GetItemNumber(), 0)
# other type of points
points = skvm.GetOutput(3)
self.assertEqual(points.GetItemNumber(), 28)
def test_FromLabelsLists(self):
# virtual markers for frame axes
labels = ['LFE', 'LFO', 'LTI', 'LTO', 'RFE', 'RFO', 'RTI', 'RTO']
virtualMarkerLabelsAxes = btk.btkStringAxesList()
for i in range(0, len(labels)):
label = labels[i]
virtualMarkerLabelsAxes.push_back(btk.btkStringAxes(label + 'O', label + 'A', label + 'L', label + 'P'))
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(_TDDConfigure.C3DFilePathIN + 'sample04/sub_labels.c3d')
reader.Update()
skvm = btk.btkSeparateKnownVirtualMarkersFilter()
skvm.SetInput(reader.GetOutput().GetPoints())
skvm.SetLabelPrefix('Matt:')
skvm.AppendVirtualMarker('LKNE')
skvm.AppendVirtualMarker('RKNE')
skvm.SetVirtualReferenceFrames(virtualMarkerLabelsAxes)
skvm.Update()
# markers
points = skvm.GetOutput(0)
self.assertEqual(points.GetItemNumber(), 52)
inc = 0
# virtual used for axes
points = skvm.GetOutput(1)
self.assertEqual(points.GetItemNumber(), 32)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LTOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFEP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RFOP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTIP'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOO'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOA'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOL'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RTOP')
# other virtual markers
points = skvm.GetOutput(2)
self.assertEqual(points.GetItemNumber(), 2)
inc = 0
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:LKNE'); inc +=1
self.assertEqual(points.GetItem(inc).GetLabel(), 'Matt:RKNE')
# other type of points
points = skvm.GetOutput(3)
self.assertEqual(points.GetItemNumber(), 28)
| bsd-3-clause |
badreddinetahir/pwn_plug_sources | src/voiper/sulley/requests/sip_valid.py | 8 | 1257 | from sulley import *
s_initialize("INVITE_VALID")
s_static('\r\n'.join(['INVITE sip:[email protected] SIP/2.0',
'CSeq: 1 INVITE',
'Via: SIP/2.0/UDP 192.168.3.102:5068;branch=z9hG4bKlm4zshdowki1t8c7ep6j0yavq2ug5r3x;rport',
'From: "nnp" <sip:[email protected]>;tag=so08p5k39wuv1dczfnij7bet4l2m6hrq',
'Call-ID: rzxd6tm98v0eal1cifg2py7sj3wk54ub@ubuntu',
'To: <sip:[email protected]>',
'Max-Forwards: 70',
'Content-Type: application/sdp',
'\r\n',
'v=0',
'o=somegimp 1190505265 1190505265 IN IP4 192.168.3.101',
's=Opal SIP Session',
'i=some information string',
'u=http://unprotectedhex.com/someuri.htm',
'[email protected]',
'c=IN IP4 192.168.3.101',
'b=CT:8',
't=0 1',
'm=audio 5028 RTP/AVP 101 96 107 110 0 8',
'a=rtpmap:101 telephone-event/8000',
]))
################################################################################
s_initialize("CANCEL_VALID")
s_static('\r\n'.join(['CANCEL sip:[email protected] SIP/2.0',
'CSeq: 1 CANCEL',
'Via: SIP/2.0/UDP 192.168.3.102:5068;branch=z9hG4bKlm4zshdowki1t8c7ep6j0yavq2ug5r3x;rport',
'From: "nnp" <sip:[email protected]>;tag=so08p5k39wuv1dczfnij7bet4l2m6hrq',
'Call-ID: rzxd6tm98v0eal1cifg2py7sj3wk54ub@ubuntu',
'To: <sip:[email protected]>',
'Max-Forwards: 70',
'\r\n'
]))
| gpl-3.0 |
donkirkby/django | tests/reverse_lookup/tests.py | 326 | 1675 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Poll, User
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
self.assertRaises(FieldError, Poll.objects.get,
choice__name__exact="This is the answer")
| bsd-3-clause |
fivejjs/PTVS | Python/Tests/TestData/DjangoAnalysisTestApp/DjangoAnalysisTestApp/settings.py | 18 | 5537 | # Django settings for DjangoAnalysisTestApp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'DjangoAnalysisTestApp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'DjangoAnalysisTestApp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| apache-2.0 |
hgrif/ds-utils | dsutils/sklearn.py | 1 | 2913 | import numpy as np
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
def multiclass_roc_auc_score(y_true, y_score, label_binarizer=None, **kwargs):
"""Compute ROC AUC score for multiclass.
:param y_true: true multiclass predictions [n_samples]
:param y_score: multiclass scores [n_samples, n_classes]
:param label_binarizer: Binarizer to use (sklearn.preprocessing.LabelBinarizer())
:param kwargs: Additional keyword arguments for sklearn.metrics.roc_auc_score
:return: Multiclass ROC AUC score
"""
if label_binarizer is None:
label_binarizer = preprocessing.LabelBinarizer()
binarized_true = label_binarizer.fit_transform(y_true)
score = metrics.roc_auc_score(binarized_true, y_score, **kwargs)
return score
def split_train_test(y, do_split_stratified=True, **kwargs):
"""Get indexes to split y in train and test sets.
:param y: Labels of samples
:param do_split_stratified: Use StratifiedShuffleSplit (else ShuffleSplit)
:param kwargs: Keyword arguments StratifiedShuffleSplit or ShuffleSplit
:return: (train indexes, test indexes)
"""
if do_split_stratified:
data_splitter = cross_validation.StratifiedShuffleSplit(y, n_iter=1,
**kwargs)
else:
data_splitter = cross_validation.ShuffleSplit(y, n_iter=1, **kwargs)
train_ix, test_ix = data_splitter.__iter__().next()
return train_ix, test_ix
class OrderedLabelEncoder(preprocessing.LabelEncoder):
"""Encode labels with value between 0 and n_classes-1 in specified order.
See also
--------
sklearn.preprocessing.LabelEncoder
"""
def __init__(self, classes):
self.classes_ = np.array(classes, dtype='O')
def fit(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def fit_transform(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.array(np.unique(y), dtype='O')
preprocessing.label._check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
transformed_y = np.zeros_like(y, dtype=int)
for i_class, current_class in enumerate(self.classes_):
transformed_y[np.array(y) == current_class] = i_class
return transformed_y | mit |
nuncjo/odoo | addons/auth_signup/__init__.py | 446 | 1039 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import controllers
import res_config
import res_users
| agpl-3.0 |
ctb/cvxpy | doc/sphinxext/docscrape.py | 68 | 15425 | """Extract reference documentation from the NumPy source tree.
"""
import inspect
import textwrap
import re
import pydoc
from StringIO import StringIO
from warnings import warn
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if not self._parsed_data.has_key(key):
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip(): break
for j,line in enumerate(doc[::-1]):
if line.strip(): break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name,arg_type,desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip(): continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section,content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize() for s in section.split(' ')])
if section in ('Parameters', 'Returns', 'Raises', 'Warns',
'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*','\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
signature = '%s%s' % (func_name, argspec)
except TypeError, e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if not roles.has_key(self._role):
print "Warning: invalid role %s" % self._role
out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
if not self['Methods']:
self['Methods'] = [(name, '', '')
for name in sorted(self.methods)]
if not self['Attributes']:
self['Attributes'] = [(name, '', '')
for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
return [name for name,func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and callable(func))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and func is None]
| gpl-3.0 |
kidaa30/spacewalk | backend/server/rhnSQL/sql_row.py | 4 | 4930 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# a class used to handle a row of data in a particular table
#
import string
from rhn.UserDictCase import UserDictCase
from spacewalk.common.rhnException import rhnException
import sql_base
import sql_lib
class Row(UserDictCase):
""" This class allows one to work with the columns of a particular row in a more
convenient manner (ie, using a disctionary interface). It allows for the row
data to be loaded and saved and is generally easier to use than the Table
class which is really designed for bulk updates and stuff like that.
The easiest way to separate what these things are for is to remember that
the Table class indexes by KEY, while the Row class indexes by column
"""
def __init__(self, db, table, hashname, hashval=None):
UserDictCase.__init__(self)
if not isinstance(db, sql_base.Database):
raise rhnException("Argument db is not a database instance", db)
self.db = db
self.table = table
self.hashname = string.lower(hashname)
# and the data dictionary
self.data = {}
# is this a real entry (ie, use insert or update)
self.real = 0
if hashval is not None: # if we have to load an entry already...
self.load(hashval)
def __repr__(self):
return "<%s instance at 0x%0x on (%s, %s, %s)>" % (
self.__class__.__name__, abs(id(self)),
self.table, self.hashname, self.get(self.hashname))
__str__ = __repr__
def __setitem__(self, name, value):
""" make it work like a dictionary """
x = string.lower(name)
# forbid setting the value of the hash column because of the
# ambiguity of the operation (is it a "save as new id" or
# "load from new id"?). We provide interfaces for load, save
# and create instead.
if x == self.hashname:
raise AttributeError("Can not reset the value of the hash key")
if x not in self.data or self.data[x][0] != value:
self.data[x] = (value, 1)
def __getitem__(self, name):
x = string.lower(name)
if x in self.data:
return self.data[x][0]
raise KeyError("Key %s not found in the Row dictionary" % name)
def get(self, name):
x = string.lower(name)
if x in self.data:
return self.data[x][0]
return None
def reset(self, val=0):
""" reset the changed status for these entries """
for k, v in self.data.items():
# tuples do not support item assignement
self.data[k] = (v[0], val)
def create(self, hashval):
""" create it as a new entry """
self.data[self.hashname] = (hashval, 0)
self.real = 0
self.save()
def load(self, hashval):
""" load an entry """
return self.load_sql("%s = :hashval" % self.hashname, {'hashval': hashval})
def load_sql(self, sql, pdict={}):
""" load from a sql clause """
h = self.db.prepare("select * from %s where %s" % (self.table, sql))
h.execute(**pdict)
ret = h.fetchone_dict()
self.data = {}
if not ret:
self.real = 0
return 0
for k, v in ret.items():
self.data[k] = (v, 0)
self.real = 1
return 1
def save(self, with_updates=1):
""" now save an entry """
if self.hashname not in self.data:
raise AttributeError("Table does not have a hash `%s' key" % self.hashname)
# get a list of fields to be set
items = map(lambda a: (a[0], a[1][0]),
filter(lambda b: b[1][1] == 1, self.data.items()))
if not items: # if there is nothing for us to do, avoid doing it.
return
# and now build the SQL statements
if self.real: # Update
if not with_updates:
raise sql_base.ModifiedRowError()
sql, pdict = sql_lib.build_sql_update(self.table, self.hashname, items)
else:
sql, pdict = sql_lib.build_sql_insert(self.table, self.hashname, items)
h = self.db.prepare(sql)
pdict["p0"] = self.data[self.hashname][0]
# and now do it
h.execute(**pdict)
self.real = 1
return
| gpl-2.0 |
alexcuellar/odoo | addons/account_budget/account_budget.py | 194 | 9368 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from openerp.osv import fields, osv
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# ---------------------------------------------------------
# Utils
# ---------------------------------------------------------
def strToDate(dt):
return date(int(dt[0:4]), int(dt[5:7]), int(dt[8:10]))
def strToDatetime(strdate):
return datetime.strptime(strdate, DEFAULT_SERVER_DATE_FORMAT)
# ---------------------------------------------------------
# Budgets
# ---------------------------------------------------------
class account_budget_post(osv.osv):
_name = "account.budget.post"
_description = "Budgetary Position"
_columns = {
'code': fields.char('Code', size=64, required=True),
'name': fields.char('Name', required=True),
'account_ids': fields.many2many('account.account', 'account_budget_rel', 'budget_id', 'account_id', 'Accounts'),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'general_budget_id', 'Budget Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
_order = "name"
class crossovered_budget(osv.osv):
_name = "crossovered.budget"
_description = "Budget"
_columns = {
'name': fields.char('Name', required=True, states={'done':[('readonly',True)]}),
'code': fields.char('Code', size=16, required=True, states={'done':[('readonly',True)]}),
'creating_user_id': fields.many2one('res.users', 'Responsible User'),
'validating_user_id': fields.many2one('res.users', 'Validate User', readonly=True),
'date_from': fields.date('Start Date', required=True, states={'done':[('readonly',True)]}),
'date_to': fields.date('End Date', required=True, states={'done':[('readonly',True)]}),
'state' : fields.selection([('draft','Draft'),('cancel', 'Cancelled'),('confirm','Confirmed'),('validate','Validated'),('done','Done')], 'Status', select=True, required=True, readonly=True, copy=False),
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'crossovered_budget_id', 'Budget Lines', states={'done':[('readonly',True)]}, copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'state': 'draft',
'creating_user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.budget.post', context=c)
}
def budget_confirm(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'confirm'
})
return True
def budget_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'draft'
})
return True
def budget_validate(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'validate',
'validating_user_id': uid,
})
return True
def budget_cancel(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'cancel'
})
return True
def budget_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {
'state': 'done'
})
return True
class crossovered_budget_lines(osv.osv):
def _prac_amt(self, cr, uid, ids, context=None):
res = {}
result = 0.0
if context is None:
context = {}
account_obj = self.pool.get('account.account')
for line in self.browse(cr, uid, ids, context=context):
acc_ids = [x.id for x in line.general_budget_id.account_ids]
if not acc_ids:
raise osv.except_osv(_('Error!'),_("The Budget '%s' has no accounts!") % ustr(line.general_budget_id.name))
acc_ids = account_obj._get_children_and_consol(cr, uid, acc_ids, context=context)
date_to = line.date_to
date_from = line.date_from
if line.analytic_account_id.id:
cr.execute("SELECT SUM(amount) FROM account_analytic_line WHERE account_id=%s AND (date "
"between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) AND "
"general_account_id=ANY(%s)", (line.analytic_account_id.id, date_from, date_to,acc_ids,))
result = cr.fetchone()[0]
if result is None:
result = 0.00
res[line.id] = result
return res
def _prac(self, cr, uid, ids, name, args, context=None):
res={}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._prac_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _theo_amt(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = {}
for line in self.browse(cr, uid, ids, context=context):
today = datetime.now()
if line.paid_date:
if strToDate(line.date_to) <= strToDate(line.paid_date):
theo_amt = 0.00
else:
theo_amt = line.planned_amount
else:
line_timedelta = strToDatetime(line.date_to) - strToDatetime(line.date_from)
elapsed_timedelta = today - (strToDatetime(line.date_from))
if elapsed_timedelta.days < 0:
# If the budget line has not started yet, theoretical amount should be zero
theo_amt = 0.00
elif line_timedelta.days > 0 and today < strToDatetime(line.date_to):
# If today is between the budget line date_from and date_to
theo_amt = (elapsed_timedelta.total_seconds() / line_timedelta.total_seconds()) * line.planned_amount
else:
theo_amt = line.planned_amount
res[line.id] = theo_amt
return res
def _theo(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = self._theo_amt(cr, uid, [line.id], context=context)[line.id]
return res
def _perc(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
if line.theoritical_amount <> 0.00:
res[line.id] = float((line.practical_amount or 0.0) / line.theoritical_amount) * 100
else:
res[line.id] = 0.00
return res
_name = "crossovered.budget.lines"
_description = "Budget Line"
_columns = {
'crossovered_budget_id': fields.many2one('crossovered.budget', 'Budget', ondelete='cascade', select=True, required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'general_budget_id': fields.many2one('account.budget.post', 'Budgetary Position',required=True),
'date_from': fields.date('Start Date', required=True),
'date_to': fields.date('End Date', required=True),
'paid_date': fields.date('Paid Date'),
'planned_amount':fields.float('Planned Amount', required=True, digits_compute=dp.get_precision('Account')),
'practical_amount':fields.function(_prac, string='Practical Amount', type='float', digits_compute=dp.get_precision('Account')),
'theoritical_amount':fields.function(_theo, string='Theoretical Amount', type='float', digits_compute=dp.get_precision('Account')),
'percentage':fields.function(_perc, string='Percentage', type='float'),
'company_id': fields.related('crossovered_budget_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
_columns = {
'crossovered_budget_line': fields.one2many('crossovered.budget.lines', 'analytic_account_id', 'Budget Lines'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yogo1212/RIOT | tests/bench_runtime_coreapis/tests/01-run.py | 14 | 1439 | #!/usr/bin/env python3
# Copyright (C) 2018 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
# The default timeout is not enough for this test on some of the slower boards
TIMEOUT = 30
BENCHMARK_REGEXP = r"\s+{func}:\s+\d+us\s+---\s+\d*\.*\d+us per call\s+---\s+\d+ calls per sec"
def testfunc(child):
child.expect_exact('Runtime of Selected Core API functions')
child.expect(BENCHMARK_REGEXP.format(func="nop loop"))
child.expect(BENCHMARK_REGEXP.format(func=r"mutex_init\(\)"))
child.expect(BENCHMARK_REGEXP.format(func="mutex lock/unlock"), timeout=TIMEOUT)
child.expect(BENCHMARK_REGEXP.format(func=r"thread_flags_set\(\)"))
child.expect(BENCHMARK_REGEXP.format(func=r"thread_flags_clear\(\)"))
child.expect(BENCHMARK_REGEXP.format(func="thread flags set/wait any"), timeout=TIMEOUT)
child.expect(BENCHMARK_REGEXP.format(func="thread flags set/wait all"), timeout=TIMEOUT)
child.expect(BENCHMARK_REGEXP.format(func="thread flags set/wait one"), timeout=TIMEOUT)
child.expect(BENCHMARK_REGEXP.format(func=r"msg_try_receive\(\)"), timeout=TIMEOUT)
child.expect(BENCHMARK_REGEXP.format(func=r"msg_avail\(\)"))
child.expect_exact('[SUCCESS]')
if __name__ == "__main__":
sys.exit(run(testfunc))
| lgpl-2.1 |
topic2k/EventGhost | plugins/FS20PCS/__init__.py | 4 | 21740 | """<rst>
Allows to send commands to FS20 receivers.
|
|fS20Image|_
`Direct shop link <http://www.elv.de/output/controller.aspx?cid=74&detail=10&detail2=27743>`__
.. |fS20Image| image:: picture.jpg
.. _fS20Image: http://www.elv.de/
"""
import time
eg.RegisterPlugin(
name = "ELV FS20 PCS",
author = "Bartman",
version = "0.2.1486",
kind = "external",
canMultiLoad = False,
createMacrosOnAdd = False,
description = __doc__,
url = "http://www.eventghost.net/forum/viewtopic.php?f=9&t=2147",
guid = '{D76A6D18-142A-4f75-8F93-9CDA86DBC310}'
)
import binascii
import math
import sys
import win32event
import wx.lib.mixins.listctrl as listmix
from wx.lib.masked import TextCtrl
import wx.lib.masked as masked
from eg.WinApi.HID import HIDThread
from eg.WinApi.HID import GetDevicePath
from eg.WinApi.HID import IsDeviceName
VENDOR_ID = 6383
PRODUCT_ID = 57365
TIME_OUT = 250
class Text:
errorFind = "Error finding ELV FS20 PCS"
timedActionName = "Timed actions"
timedActionDescription = "Allows controlling FS20 devices with timed parameter."
address = "Address:"
timerValue = "Timer value:"
repeat = "Repeat:"
level = "Level:"
repeatSuffix = "{0} ({1} times)"
class FS20PCS(eg.PluginClass):
text = Text
def AddNewAction(self, root, internalName, baseClass, classFuncCode, externalName, classDescription, classLabelFormat):
class MyText:
labelFormat = classLabelFormat
class tmpAction(baseClass):
text = MyText
name = externalName
description = classDescription
funcCode = classFuncCode
tmpAction.__name__ = internalName
root.AddAction(tmpAction)
def __init__(self):
self.version = None
self.thread = None
self.AddNewAction(self, "Off", SimpleAction, 0x00, "Off", "Turns device off (dim to 0%)", "Turn off {0}")
self.AddNewAction(self, "On", SimpleAction, 0x10, "On", "Turns device on (dim to 100%)", "Turn on {0}")
self.AddNewAction(self, "PreviousValue", SimpleAction, 0x11, "On with previous value", "Turns device on with previous value", "Turn on {0} with previous value")
self.AddNewAction(self, "Toggle", SimpleAction, 0x12, "Toggle", "Toggles between off and previous value", "Toggle {0} between off and previous value")
self.AddNewAction(self, "DimDown", RepeatAction, 0x14, "Dim down", "Dims down", "Dim down {0}")
self.AddNewAction(self, "DimUp", RepeatAction, 0x13, "Dim up", "Dims up", "Dim up {0}")
self.AddAction(Dim)
self.AddNewAction(self, "DimAlternating", RepeatAction, 0x15, "Alternating dim", "Dims up one level until maximum, then dim down", "Alternating dim {0}")
group = self.AddGroup(self.text.timedActionName, self.text.timedActionDescription)
self.AddNewAction(group, "OffTimer", TimerValueAction, 0x20, "Off in timer value", "Turns device off (dim to 0%) in timer value", "Turn off {0} in {1}")
self.AddNewAction(group, "OnTimer", TimerValueAction, 0x30, "On in timer value", "Turns device on (dim to 100%) in timer value", "Turn on {0} in {1}")
self.AddNewAction(group, "PreviousValueTimer", TimerValueAction, 0x31, "On with previous value in timer value", "Turns device on with previous value in timer value", "Turn on {0} with previous value in {1}")
self.AddNewAction(group, "ToggleTimer", TimerValueAction, 0x32, "Toggle in timer value", "Toggles between off and previous value in timer value", "Toggle {0} between off and previous value in {1}")
group.AddAction(DimTimer)
self.AddNewAction(group, "OffPreviousValueInternal", SimpleAction, 0x18, "Off for internal timer value, previous value afterwards", "Turns off (dim to 0%) device for internal timer value and return to previous value afterwards", "Turn off {0} for internal timer value and return to previous value afterwards")
self.AddNewAction(group, "OffPreviousValueTimer", TimerValueAction, 0x38, "Off for timer value, previous value afterwards", "Turns off (dim to 0%) device for timer value and return to previous value afterwards", "Turn off {0} for {1} and return to previous value afterwards")
self.AddNewAction(group, "OnOffInternal", SimpleAction, 0x19, "On (dim to 100%) for internal timer value, off afterwards", "Turns on (device dim to 100%) for internal timer value and turns it off afterwards", "Turn on {0} for internal timer value and turn off afterwards")
self.AddNewAction(group, "OnOffTimer", TimerValueAction, 0x39, "On (dim to 100%) for timer value, off afterwards", "Turns on (device dim to 100%) for timer value and turns it off afterwards", "Turn on {0} for {1} and turn off afterwards")
self.AddNewAction(group, "PreviousValueOffInternal", SimpleAction, 0x1a, "Previous value for internal timer value, off afterwards", "Turns on device with previous value for internal timer value and turns it off afterwards", "Turn on {0} with previous value for internal timer value and turn off afterwards")
self.AddNewAction(group, "PreviousValueOffTimer", TimerValueAction, 0x3a, "Previous value for timer value, off afterwards", "Turns on device with previous value for timer value and turns it off afterwards", "Turn on {0} with previous value for {1} and turn off afterwards")
self.AddNewAction(group, "OnPreviousStateInternal", SimpleAction, 0x1e, "On for internal timer value, previous state afterwards", "Turns on (dim to 100%) device for internal timer value and return to previous state afterwards", "Turn on {0} for internal timer value and return to previous state afterwards")
self.AddNewAction(group, "OnPreviousStateTimer", TimerValueAction, 0x3e, "On for timer value, previous state afterwards", "Turns on (dim to 100%) device for timer value and return to previous state afterwards", "Turn on {0} for {1} and return to previous state afterwards")
self.AddNewAction(group, "PreviousValuePreviousStateInternal", SimpleAction, 0x1f, "Previous value for internal timer value, previous state afterwards", "Turns on device with previous value for internal timer value and return to previous state afterwards", "Turn on {0} with previous value for internal timer value and return to previous state afterwards")
self.AddNewAction(group, "PreviousValuePreviousStateTimer", TimerValueAction, 0x3f, "Previous value for timer value, previous state afterwards", "Turns on device with previous value for timer value and return to previous state afterwards", "Turn on {0} with previous value for {1} and return to previous state afterwards")
self.AddNewAction(group, "DimUpOffTimer", RepeatTimerValueAction, 0x33, "Dim up and turn off after timer value", "Dims up and turns off after timer value", "Dim up {0} and turn off after {1}")
self.AddNewAction(group, "DimDownOffTimer", RepeatTimerValueAction, 0x34, "Dim down and turn off after timer value", "Dims down and turns off after timer value", "Dim down {0} and turn off after {1}")
self.AddNewAction(group, "DimAlternatingOffTimer", RepeatTimerValueAction, 0x35, "Alternating dim and turn off after timer value", "Dims up one level until maximum, then dim down and turns off after timer value", "Alternating dim {0} and turn off after {1}")
group = self.AddGroup("Programming", "Allows programming of FS20 devices. You should prefer timed actions and only use these for initial setup.")
self.AddNewAction(group, "ProgramTimer", SimpleAction, 0x16, "Start/stop programming of internal timer", "Starts respectively stop programming of the internal timer", "Start/stop programming of internal timer for {0}")
self.AddNewAction(group, "ProgramCode", SimpleAction, 0x17, "Program address", "Learn address. This is a dummy action which does nothing, but can be used for address learning procedure on some devices.", "Learn address {0}")
self.AddNewAction(group, "ProgramFactoryDefaults", SimpleAction, 0x1b, "Reset device to factory defaults", "Reset device to factory defaults", "Reset {0} to factory defaults")
self.AddNewAction(group, "ProgramInternalTimer", TimerValueAction, 0x36, "Program internal timer value", "Program internal timer value", "Program internal timer value for {0} to {1}")
self.AddNewAction(group, "ProgramDimUpRampTimer", TimerValueAction, 0x3c, "Program dim up ramp timer value", "Program dim up ramp timer value", "Program dim up ramp timer value for {0} to {1}")
self.AddNewAction(group, "ProgramDimDownRampTimer", TimerValueAction, 0x3d, "Program dim down ramp timer value", "Program dim down ramp timer value", "Program dim down ramp timer value for {0} to {1}")
def RawCallback(self, data):
if eg.debugLevel:
print "FS20PCS RawCallBack", binascii.hexlify(data)
if len(data) != 5 or data[0:3] != "\x02\x03\xA0":
self.PrintError("data must have a length of 5 and start with 02 03 A0")
errorId = ord(data[3:4])
if errorId == 0:
pass
#everything is fine
elif errorId == 1:
#Firmware version was requested
self.version = ord(data[4:5])
elif errorId == 2:
#Firmware version was requested
self.version = ord(data[4:5])
elif errorId == 3:
self.PrintError("Unknown command id")
elif errorId == 4:
self.PrintError("invalid command length")
elif errorId == 5:
self.PrintError("nothing to abort")
else:
self.PrintError("Unknown Error")
def PrintVersion(self):
#create the following Python command to show version number
#eg.plugins.FS20PCS.plugin.PrintVersion()
versionMajor = self.version / 16
versionMinor = self.version % 16
print "Firmware version %d.%d" % (versionMajor, versionMinor)
def StopCallback(self):
self.TriggerEvent("Stopped")
self.thread = None
def GetMyDevicePath(self):
path = GetDevicePath(
None,
VENDOR_ID,
PRODUCT_ID,
None,
0,
True,
0)
return path;
def SendRawCommand(self, data, timeout = 0):
if not self.thread:
self.PrintError("Plug in is not running.")
return
dataLength = len(data)
if eg.debugLevel:
print "FS20PCS SendRawCommand", binascii.hexlify(data)
newData = data + ((11 - dataLength) * '\x00')
self.thread.Write(newData, timeout + 1000)#extra second to wait for response
def Abort(self):
self.SendRawCommand("\x01\x01\xf3")
def RequestVersion(self):
data = '\x01\x01\xf0'
self.SendRawCommand(data)
def SetupHidThread(self, newDevicePath):
#create thread
thread = HIDThread(self.name, newDevicePath, self.name)
thread.SetStopCallback(self.StopCallback)
thread.SetRawCallback(self.RawCallback)
thread.start()
thread.WaitForInit()
self.thread = thread
self.RequestVersion()
def ReconnectDevice(self, event):
"""method to reconnect a disconnected device"""
if self.thread == None:
if not IsDeviceName(event.payload, VENDOR_ID, PRODUCT_ID):
return
#check if the right device was connected
#getting devicePath
newDevicePath = self.GetMyDevicePath()
if not newDevicePath:
#wrong device
return
self.SetupHidThread(newDevicePath)
def __start__(self):
#Bind plug in to RegisterDeviceNotification message
eg.Bind("System.DeviceAttached", self.ReconnectDevice)
newDevicePath = self.GetMyDevicePath()
if not newDevicePath:
#device not found
self.PrintError(Text.errorFind)
else:
self.SetupHidThread(newDevicePath)
def __stop__(self):
if self.thread:
self.thread.AbortThread()
#unbind from RegisterDeviceNotification message
eg.Unbind("System.DeviceAttached", self.ReconnectDevice)
def GetAddressBytes(address):
x, a0 = divmod(address, 256)
a2, a1 = divmod(x, 256)
return chr(a2) + chr(a1) + chr(a0)
def GetStringFromAddress(address, formatted = False):
valueStr = ""
for i in range(11, -1, -1):
x = (address >> i*2) & 0x03
valueStr += str(x + 1)
if formatted:
if i == 4:
valueStr += " - "
if i == 8:
valueStr += " "
return valueStr
def GetAddressFromString(addressString):
address = 0
for i in range(12):
address <<= 2
address += int(addressString[i]) - 1
return address
def GetTimeCodeByIndex(index):
if index < 16:
return index
return index + ((index / 8) - 1) * 8
def GetTimeCodeIndex(timeCode):
if timeCode < 16:
return timeCode
return timeCode - (timeCode / 16) * 8
def GetTimeValue(timeCode):
return (2**(timeCode / 16)) * 0.25 * (timeCode % 16)
def FormatTimeValue(timeValue):
if timeValue >= 3600:
hours = math.floor(timeValue / 3600)
minutes = math.floor((timeValue - (hours * 3600)) / 60)
seconds = timeValue - (hours * 3600) - minutes * 60
return "%0d h %00d m %00d s" % (hours, minutes, seconds)
elif timeValue >= 60:
minutes = math.floor(timeValue / 60)
seconds = timeValue - minutes * 60
return "%00d m %00d s" % (minutes, seconds)
else:
return "%0.02f sec" % timeValue
class ActionBase(eg.ActionBase):
defaultAddress = 0 #GetAddressFromString("123412342222")
funcCode = None
name = None
description = None
def AddAddressControl(self, panel, address):
if address is None:
address = self.defaultAddress
maskedCtrl = masked.TextCtrl(
parent=panel,
mask="#### #### - ####",
defaultValue="1111 1111 - 1111",
excludeChars="056789",
formatcodes="F",
validRequired=False,
)
maskedCtrl.SetValue(GetStringFromAddress(address))
panel.AddLine(self.plugin.text.address, maskedCtrl)
return maskedCtrl
def AddTimerControl(self, panel, timeCode):
def TimerCallback(value):
timeCodeForValue = GetTimeCodeByIndex(value)
return FormatTimeValue(GetTimeValue(timeCodeForValue))
timerCtrl = eg.Slider(
panel,
value=GetTimeCodeIndex(timeCode),
min=0,
max=111,
minLabel=FormatTimeValue(0),
maxLabel=FormatTimeValue(15360),
style = wx.SL_TOP,
size=(300,-1),
levelCallback=TimerCallback
)
timerCtrl.SetMinSize((300, -1))
panel.AddLine(self.plugin.text.timerValue, timerCtrl)
return timerCtrl
def AddRepeatControl(self, panel, repeatCount):
repeatCtrl = eg.Slider(
panel,
value=repeatCount,
min=1,
max=255,
minLabel="1",
maxLabel="255",
style = wx.SL_TOP,
size=(300,-1),
)
repeatCtrl.SetMinSize((300, -1))
panel.AddLine(self.plugin.text.repeat, repeatCtrl)
return repeatCtrl
def AddLevelControl(self, panel, level):
def LevelCallback(value):
return "%.02f%%" % (value * 100.00 / 16)
levelCtrl = eg.Slider(
panel,
value=level,
min=0,
max=16,
minLabel="0.00%",
maxLabel="100.00%",
style = wx.SL_AUTOTICKS|wx.SL_TOP,
size=(300,-1),
levelCallback=LevelCallback
)
levelCtrl.SetMinSize((300, -1))
panel.AddLine(self.plugin.text.level, levelCtrl)
return levelCtrl
class SimpleAction(ActionBase):
"""Base class for all action that only take an address as input
"""
def __call__(self, address):
self.plugin.SendRawCommand("\x01\x06\xf1" + GetAddressBytes(address) + chr(self.funcCode))
def GetLabel(self, address):
return self.text.labelFormat.format(GetStringFromAddress(address, True))
def Configure(self, address = None):
panel = eg.ConfigPanel()
maskedCtrl = self.AddAddressControl(panel, address)
while panel.Affirmed():
address = GetAddressFromString(maskedCtrl.GetPlainValue())
ActionBase.defaultAddress = address
panel.SetResult(address)
class RepeatAction(ActionBase):
"""Base class for all action that take an address and repeat Count
"""
def __call__(self, address, repeatCount):
self.plugin.SendRawCommand("\x01\x07\xf2" + GetAddressBytes(address) + chr(self.funcCode) + "\x00" + chr(repeatCount), repeatCount * TIME_OUT)
def GetLabel(self, address, repeatCount):
label = self.text.labelFormat.format(GetStringFromAddress(address, True))
if repeatCount > 1:
label = self.plugin.text.repeatSuffix.format(label, repeatCount)
return label
def Configure(self, address = None, repeatCount = 1):
panel = eg.ConfigPanel()
maskedCtrl = self.AddAddressControl(panel, address)
repeatCtrl = self.AddRepeatControl(panel, repeatCount)
while panel.Affirmed():
address = GetAddressFromString(maskedCtrl.GetPlainValue())
ActionBase.defaultAddress = address
panel.SetResult(address, repeatCtrl.GetValue())
class RepeatTimerValueAction(ActionBase):
"""Base class for all action that take an address, timer value and repeat Count
"""
def __call__(self, address, timeCode, repeatCount):
self.plugin.SendRawCommand("\x01\x07\xf2" + GetAddressBytes(address) + chr(self.funcCode) + chr(timeCode) + chr(repeatCount), repeatCount * TIME_OUT)
def GetLabel(self, address, timeCode, repeatCount):
label = self.text.labelFormat.format(GetStringFromAddress(address, True), FormatTimeValue(GetTimeValue(timeCode)))
if repeatCount > 1:
label = self.plugin.text.repeatSuffix.format(label, repeatCount)
return label
def Configure(self, address = None, timeCode = 0, repeatCount = 1):
panel = eg.ConfigPanel()
maskedCtrl = self.AddAddressControl(panel, address)
timerCtrl = self.AddTimerControl(panel, timeCode)
repeatCtrl = self.AddRepeatControl(panel, repeatCount)
while panel.Affirmed():
address = GetAddressFromString(maskedCtrl.GetPlainValue())
ActionBase.defaultAddress = address
panel.SetResult(address, GetTimeCodeByIndex(timerCtrl.GetValue()), repeatCtrl.GetValue())
class TimerValueAction(ActionBase):
"""Base class for all action that take an address and timer value
"""
def __call__(self, address, timeCode):
self.plugin.SendRawCommand("\x01\x06\xf1" + GetAddressBytes(address) + chr(self.funcCode) + chr(timeCode))
def GetLabel(self, address, timeCode):
return self.text.labelFormat.format(GetStringFromAddress(address, True), FormatTimeValue(GetTimeValue(timeCode)))
def Configure(self, address = None, timeCode = 0):
panel = eg.ConfigPanel()
maskedCtrl = self.AddAddressControl(panel, address)
timerCtrl = self.AddTimerControl(panel, timeCode)
while panel.Affirmed():
address = GetAddressFromString(maskedCtrl.GetPlainValue())
ActionBase.defaultAddress = address
panel.SetResult(address, GetTimeCodeByIndex(timerCtrl.GetValue()))
class Dim(ActionBase):
class Text:
labelFormat = "Set dim-level to {1:.02f}% for {0}"
name = "Dim"
description = "Sets dim level immediately"
text = Text
def __call__(self, address, level):
self.plugin.SendRawCommand("\x01\x06\xf1" + GetAddressBytes(address) + chr(level))
def GetLabel(self, address, level):
return self.text.labelFormat.format(GetStringFromAddress(address, True), (level * 100.00 / 16))
def Configure(self, address = None, level = 8):
panel = eg.ConfigPanel()
maskedCtrl = self.AddAddressControl(panel, address)
levelCtrl = self.AddLevelControl(panel, level)
while panel.Affirmed():
address = GetAddressFromString(maskedCtrl.GetPlainValue())
ActionBase.defaultAddress = address
panel.SetResult(address, levelCtrl.GetValue())
class DimTimer(ActionBase):
class Text:
labelFormat = "Set dim-level to {1:.02f}% for {0} in {2}"
name = "Dim in timer value"
description = "Sets the dim level in timer value"
text = Text
def __call__(self, address, level, timeCode):
self.plugin.SendRawCommand("\x01\x06\xf1" + GetAddressBytes(address) + chr(level + 32) + chr(timeCode))
def GetLabel(self, address, level, timeCode):
return self.text.labelFormat.format(GetStringFromAddress(address, True), (level * 100.00 / 16), FormatTimeValue(GetTimeValue(timeCode)))
def Configure(self, address = None, level = 8, timeCode = 0):
panel = eg.ConfigPanel()
maskedCtrl = self.AddAddressControl(panel, address)
levelCtrl = self.AddLevelControl(panel, level)
timerCtrl = self.AddTimerControl(panel, timeCode)
while panel.Affirmed():
address = GetAddressFromString(maskedCtrl.GetPlainValue())
ActionBase.defaultAddress = address
panel.SetResult(
address,
levelCtrl.GetValue(),
GetTimeCodeByIndex(timerCtrl.GetValue()))
| gpl-2.0 |
neilLasrado/erpnext | erpnext/crm/doctype/investor/investor.py | 1 | 2929 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
from erpnext.accounts.party import validate_party_accounts, get_dashboard_info, get_timeline_data # keep this
class Investor(Document):
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self)
def after_insert(self):
self.update_lead_status()
def on_update(self):
if self.flags.old_lead != self.party_name:
self.update_lead_status()
if self.investor_from == "Lead" and self.party_name:
self.create_lead_address()
self.create_lead_contact()
if self.investor_from == "Opportunity" and self.party_name:
self.create_opportunity_address()
self.create_opportunity_contact()
def update_lead_status(self):
'''If Investor created from Lead, update lead status to "Investor"'''
if self.investor_from == "Lead" and self.party_name:
frappe.db.set_value('Lead', self.party_name, 'status', 'Investor', update_modified=False)
def create_lead_address(self):
# assign lead address to investor (if already not set)
address_names = frappe.get_all('Dynamic Link', filters={
"parenttype": "Address",
"link_doctype": "Lead",
"link_name": self.party_name
}, fields=["parent as name"])
for address_name in address_names:
address = frappe.get_doc('Address', address_name.get('name'))
if not address.has_link('Investor', self.name):
address.append('links', dict(link_doctype='Investor', link_name=self.name))
address.save()
def create_lead_contact(self):
# assign lead contact to investor (if already not set)
contact_names = frappe.get_all('Dynamic Link', filters={
"parenttype": "Contact",
"link_doctype": "Lead",
"link_name": self.party_name
}, fields=["parent as name"])
for contact_name in contact_names:
contact = frappe.get_doc('Contact', contact_name.get('name'))
if not contact.has_link('Investor', self.name):
contact.append('links', dict(link_doctype='Investor', link_name=self.name))
contact.save()
def create_opportunity_address(self):
customer_address = frappe.db.get_value("Opportunity", self.party_name, "customer_address")
if customer_address:
address = frappe.get_doc('Address', customer_address)
if not address.has_link('Investor', self.name):
address.append('links', dict(link_doctype='Investor', link_name=self.name))
address.save()
def create_opportunity_contact(self):
contact_person = frappe.db.get_value("Opportunity", self.party_name, "contact_person")
if contact_person:
contact = frappe.get_doc('Contact', contact_person)
if not contact.has_link('Investor', self.name):
contact.append('links', dict(link_doctype='Investor', link_name=self.name))
contact.save()
| gpl-3.0 |
ThinkingBridge/platform_external_chromium_org | media/tools/constrained_network_server/cns.py | 168 | 17314 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Constrained Network Server. Serves files with supplied network constraints.
The CNS exposes a web based API allowing network constraints to be imposed on
file serving.
TODO(dalecurtis): Add some more docs here.
"""
import logging
from logging import handlers
import mimetypes
import optparse
import os
import signal
import sys
import threading
import time
import urllib
import urllib2
import traffic_control
try:
import cherrypy
except ImportError:
print ('CNS requires CherryPy v3 or higher to be installed. Please install\n'
'and try again. On Linux: sudo apt-get install python-cherrypy3\n')
sys.exit(1)
# Add webm file types to mimetypes map since cherrypy's default type is text.
mimetypes.types_map['.webm'] = 'video/webm'
# Default logging is ERROR. Use --verbose to enable DEBUG logging.
_DEFAULT_LOG_LEVEL = logging.ERROR
# Default port to serve the CNS on.
_DEFAULT_SERVING_PORT = 9000
# Default port range for constrained use.
_DEFAULT_CNS_PORT_RANGE = (50000, 51000)
# Default number of seconds before a port can be torn down.
_DEFAULT_PORT_EXPIRY_TIME_SECS = 5 * 60
class PortAllocator(object):
"""Dynamically allocates/deallocates ports with a given set of constraints."""
def __init__(self, port_range, expiry_time_secs=5 * 60):
"""Sets up initial state for the Port Allocator.
Args:
port_range: Range of ports available for allocation.
expiry_time_secs: Amount of time in seconds before constrained ports are
cleaned up.
"""
self._port_range = port_range
self._expiry_time_secs = expiry_time_secs
# Keeps track of ports we've used, the creation key, and the last request
# time for the port so they can be cached and cleaned up later.
self._ports = {}
# Locks port creation and cleanup. TODO(dalecurtis): If performance becomes
# an issue a per-port based lock system can be used instead.
self._port_lock = threading.RLock()
def Get(self, key, new_port=False, **kwargs):
"""Sets up a constrained port using the requested parameters.
Requests for the same key and constraints will result in a cached port being
returned if possible, subject to new_port.
Args:
key: Used to cache ports with the given constraints.
new_port: Whether to create a new port or use an existing one if possible.
**kwargs: Constraints to pass into traffic control.
Returns:
None if no port can be setup or the port number of the constrained port.
"""
with self._port_lock:
# Check port key cache to see if this port is already setup. Update the
# cache time and return the port if so. Performance isn't a concern here,
# so just iterate over ports dict for simplicity.
full_key = (key,) + tuple(kwargs.values())
if not new_port:
for port, status in self._ports.iteritems():
if full_key == status['key']:
self._ports[port]['last_update'] = time.time()
return port
# Cleanup ports on new port requests. Do it after the cache check though
# so we don't erase and then setup the same port.
if self._expiry_time_secs > 0:
self.Cleanup(all_ports=False)
# Performance isn't really an issue here, so just iterate over the port
# range to find an unused port. If no port is found, None is returned.
for port in xrange(self._port_range[0], self._port_range[1]):
if port in self._ports:
continue
if self._SetupPort(port, **kwargs):
kwargs['port'] = port
self._ports[port] = {'last_update': time.time(), 'key': full_key,
'config': kwargs}
return port
def _SetupPort(self, port, **kwargs):
"""Setup network constraints on port using the requested parameters.
Args:
port: The port number to setup network constraints on.
**kwargs: Network constraints to set up on the port.
Returns:
True if setting the network constraints on the port was successful, false
otherwise.
"""
kwargs['port'] = port
try:
cherrypy.log('Setting up port %d' % port)
traffic_control.CreateConstrainedPort(kwargs)
return True
except traffic_control.TrafficControlError as e:
cherrypy.log('Error: %s\nOutput: %s' % (e.msg, e.error))
return False
def Cleanup(self, all_ports, request_ip=None):
"""Cleans up expired ports, or if all_ports=True, all allocated ports.
By default, ports which haven't been used for self._expiry_time_secs are
torn down. If all_ports=True then they are torn down regardless.
Args:
all_ports: Should all ports be torn down regardless of expiration?
request_ip: Tear ports matching the IP address regarless of expiration.
"""
with self._port_lock:
now = time.time()
# Use .items() instead of .iteritems() so we can delete keys w/o error.
for port, status in self._ports.items():
expired = now - status['last_update'] > self._expiry_time_secs
matching_ip = request_ip and status['key'][0].startswith(request_ip)
if all_ports or expired or matching_ip:
cherrypy.log('Cleaning up port %d' % port)
self._DeletePort(port)
del self._ports[port]
def _DeletePort(self, port):
"""Deletes network constraints on port.
Args:
port: The port number associated with the network constraints.
"""
try:
traffic_control.DeleteConstrainedPort(self._ports[port]['config'])
except traffic_control.TrafficControlError as e:
cherrypy.log('Error: %s\nOutput: %s' % (e.msg, e.error))
class ConstrainedNetworkServer(object):
"""A CherryPy-based HTTP server for serving files with network constraints."""
def __init__(self, options, port_allocator):
"""Sets up initial state for the CNS.
Args:
options: optparse based class returned by ParseArgs()
port_allocator: A port allocator instance.
"""
self._options = options
self._port_allocator = port_allocator
@cherrypy.expose
def Cleanup(self):
"""Cleans up all the ports allocated using the request IP address.
When requesting a constrained port, the cherrypy.request.remote.ip is used
as a key for that port (in addition to other request parameters). Such
ports created for the same IP address are removed.
"""
cherrypy.log('Cleaning up ports allocated by %s.' %
cherrypy.request.remote.ip)
self._port_allocator.Cleanup(all_ports=False,
request_ip=cherrypy.request.remote.ip)
@cherrypy.expose
def ServeConstrained(self, f=None, bandwidth=None, latency=None, loss=None,
new_port=False, no_cache=False, **kwargs):
"""Serves the requested file with the requested constraints.
Subsequent requests for the same constraints from the same IP will share the
previously created port unless new_port equals True. If no constraints
are provided the file is served as is.
Args:
f: path relative to http root of file to serve.
bandwidth: maximum allowed bandwidth for the provided port (integer
in kbit/s).
latency: time to add to each packet (integer in ms).
loss: percentage of packets to drop (integer, 0-100).
new_port: whether to use a new port for this request or not.
no_cache: Set reponse's cache-control to no-cache.
"""
if no_cache:
response = cherrypy.response
response.headers['Pragma'] = 'no-cache'
response.headers['Cache-Control'] = 'no-cache'
# CherryPy is a bit wonky at detecting parameters, so just make them all
# optional and validate them ourselves.
if not f:
raise cherrypy.HTTPError(400, 'Invalid request. File must be specified.')
# Check existence early to prevent wasted constraint setup.
self._CheckRequestedFileExist(f)
# If there are no constraints, just serve the file.
if bandwidth is None and latency is None and loss is None:
return self._ServeFile(f)
constrained_port = self._GetConstrainedPort(
f, bandwidth=bandwidth, latency=latency, loss=loss, new_port=new_port,
**kwargs)
# Build constrained URL using the constrained port and original URL
# parameters except the network constraints (bandwidth, latency, and loss).
constrained_url = self._GetServerURL(f, constrained_port,
no_cache=no_cache, **kwargs)
# Redirect request to the constrained port.
cherrypy.log('Redirect to %s' % constrained_url)
cherrypy.lib.cptools.redirect(constrained_url, internal=False)
def _CheckRequestedFileExist(self, f):
"""Checks if the requested file exists, raises HTTPError otherwise."""
if self._options.local_server_port:
self._CheckFileExistOnLocalServer(f)
else:
self._CheckFileExistOnServer(f)
def _CheckFileExistOnServer(self, f):
"""Checks if requested file f exists to be served by this server."""
# Sanitize and check the path to prevent www-root escapes.
sanitized_path = os.path.abspath(os.path.join(self._options.www_root, f))
if not sanitized_path.startswith(self._options.www_root):
raise cherrypy.HTTPError(403, 'Invalid file requested.')
if not os.path.exists(sanitized_path):
raise cherrypy.HTTPError(404, 'File not found.')
def _CheckFileExistOnLocalServer(self, f):
"""Checks if requested file exists on local server hosting files."""
test_url = self._GetServerURL(f, self._options.local_server_port)
try:
cherrypy.log('Check file exist using URL: %s' % test_url)
return urllib2.urlopen(test_url) is not None
except Exception:
raise cherrypy.HTTPError(404, 'File not found on local server.')
def _ServeFile(self, f):
"""Serves the file as an http response."""
if self._options.local_server_port:
redirect_url = self._GetServerURL(f, self._options.local_server_port)
cherrypy.log('Redirect to %s' % redirect_url)
cherrypy.lib.cptools.redirect(redirect_url, internal=False)
else:
sanitized_path = os.path.abspath(os.path.join(self._options.www_root, f))
return cherrypy.lib.static.serve_file(sanitized_path)
def _GetServerURL(self, f, port, **kwargs):
"""Returns a URL for local server to serve the file on given port.
Args:
f: file name to serve on local server. Relative to www_root.
port: Local server port (it can be a configured constrained port).
kwargs: extra parameteres passed in the URL.
"""
url = '%s?f=%s&' % (cherrypy.url(), f)
if self._options.local_server_port:
url = '%s/%s?' % (
cherrypy.url().replace('ServeConstrained', self._options.www_root), f)
url = url.replace(':%d' % self._options.port, ':%d' % port)
extra_args = urllib.urlencode(kwargs)
if extra_args:
url += extra_args
return url
def _GetConstrainedPort(self, f=None, bandwidth=None, latency=None, loss=None,
new_port=False, **kwargs):
"""Creates or gets a port with specified network constraints.
See ServeConstrained() for more details.
"""
# Validate inputs. isdigit() guarantees a natural number.
bandwidth = self._ParseIntParameter(
bandwidth, 'Invalid bandwidth constraint.', lambda x: x > 0)
latency = self._ParseIntParameter(
latency, 'Invalid latency constraint.', lambda x: x >= 0)
loss = self._ParseIntParameter(
loss, 'Invalid loss constraint.', lambda x: x <= 100 and x >= 0)
redirect_port = self._options.port
if self._options.local_server_port:
redirect_port = self._options.local_server_port
start_time = time.time()
# Allocate a port using the given constraints. If a port with the requested
# key and kwargs already exist then reuse that port.
constrained_port = self._port_allocator.Get(
cherrypy.request.remote.ip, server_port=redirect_port,
interface=self._options.interface, bandwidth=bandwidth, latency=latency,
loss=loss, new_port=new_port, file=f, **kwargs)
cherrypy.log('Time to set up port %d = %.3fsec.' %
(constrained_port, time.time() - start_time))
if not constrained_port:
raise cherrypy.HTTPError(503, 'Service unavailable. Out of ports.')
return constrained_port
def _ParseIntParameter(self, param, msg, check):
"""Returns integer value of param and verifies it satisfies the check.
Args:
param: Parameter name to check.
msg: Message in error if raised.
check: Check to verify the parameter value.
Returns:
None if param is None, integer value of param otherwise.
Raises:
cherrypy.HTTPError if param can not be converted to integer or if it does
not satisfy the check.
"""
if param:
try:
int_value = int(param)
if check(int_value):
return int_value
except:
pass
raise cherrypy.HTTPError(400, msg)
def ParseArgs():
"""Define and parse the command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('--expiry-time', type='int',
default=_DEFAULT_PORT_EXPIRY_TIME_SECS,
help=('Number of seconds before constrained ports expire '
'and are cleaned up. 0=Disabled. Default: %default'))
parser.add_option('--port', type='int', default=_DEFAULT_SERVING_PORT,
help='Port to serve the API on. Default: %default')
parser.add_option('--port-range', default=_DEFAULT_CNS_PORT_RANGE,
help=('Range of ports for constrained serving. Specify as '
'a comma separated value pair. Default: %default'))
parser.add_option('--interface', default='eth0',
help=('Interface to setup constraints on. Use lo for a '
'local client. Default: %default'))
parser.add_option('--socket-timeout', type='int',
default=cherrypy.server.socket_timeout,
help=('Number of seconds before a socket connection times '
'out. Default: %default'))
parser.add_option('--threads', type='int',
default=cherrypy._cpserver.Server.thread_pool,
help=('Number of threads in the thread pool. Default: '
'%default'))
parser.add_option('--www-root', default='',
help=('Directory root to serve files from. If --local-'
'server-port is used, the path is appended to the '
'redirected URL of local server. Defaults to the '
'current directory (if --local-server-port is not '
'used): %s' % os.getcwd()))
parser.add_option('--local-server-port', type='int',
help=('Optional local server port to host files.'))
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Turn on verbose output.')
options = parser.parse_args()[0]
# Convert port range into the desired tuple format.
try:
if isinstance(options.port_range, str):
options.port_range = [int(port) for port in options.port_range.split(',')]
except ValueError:
parser.error('Invalid port range specified.')
if options.expiry_time < 0:
parser.error('Invalid expiry time specified.')
# Convert the path to an absolute to remove any . or ..
if not options.local_server_port:
if not options.www_root:
options.www_root = os.getcwd()
options.www_root = os.path.abspath(options.www_root)
_SetLogger(options.verbose)
return options
def _SetLogger(verbose):
file_handler = handlers.RotatingFileHandler('cns.log', 'a', 10000000, 10)
file_handler.setFormatter(logging.Formatter('[%(threadName)s] %(message)s'))
log_level = _DEFAULT_LOG_LEVEL
if verbose:
log_level = logging.DEBUG
file_handler.setLevel(log_level)
cherrypy.log.error_log.addHandler(file_handler)
cherrypy.log.access_log.addHandler(file_handler)
def Main():
"""Configure and start the ConstrainedNetworkServer."""
options = ParseArgs()
try:
traffic_control.CheckRequirements()
except traffic_control.TrafficControlError as e:
cherrypy.log(e.msg)
return
cherrypy.config.update({'server.socket_host': '::',
'server.socket_port': options.port})
if options.threads:
cherrypy.config.update({'server.thread_pool': options.threads})
if options.socket_timeout:
cherrypy.config.update({'server.socket_timeout': options.socket_timeout})
# Setup port allocator here so we can call cleanup on failures/exit.
pa = PortAllocator(options.port_range, expiry_time_secs=options.expiry_time)
try:
cherrypy.quickstart(ConstrainedNetworkServer(options, pa))
finally:
# Disable Ctrl-C handler to prevent interruption of cleanup.
signal.signal(signal.SIGINT, lambda signal, frame: None)
pa.Cleanup(all_ports=True)
if __name__ == '__main__':
Main()
| bsd-3-clause |
doismellburning/django | tests/signals/tests.py | 311 | 10273 | from __future__ import unicode_literals
from django.db import models
from django.db.models import signals
from django.dispatch import receiver
from django.test import TestCase
from django.utils import six
from .models import Author, Book, Car, Person
class BaseSignalTest(TestCase):
def setUp(self):
# Save up the number of connected signals so that we can check at the
# end that all the signals we register get properly unregistered (#9989)
self.pre_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
def tearDown(self):
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
self.assertEqual(self.pre_signals, post_signals)
class SignalTests(BaseSignalTest):
def test_model_pre_init_and_post_init(self):
data = []
def pre_init_callback(sender, args, **kwargs):
data.append(kwargs['kwargs'])
signals.pre_init.connect(pre_init_callback)
def post_init_callback(sender, instance, **kwargs):
data.append(instance)
signals.post_init.connect(post_init_callback)
p1 = Person(first_name="John", last_name="Doe")
self.assertEqual(data, [{}, p1])
def test_save_signals(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("raw", False))
)
def post_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("created"), kwargs.get("raw", False))
)
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
self.assertEqual(data, [
(p1, False),
(p1, True, False),
])
data[:] = []
p1.first_name = "Tom"
p1.save()
self.assertEqual(data, [
(p1, False),
(p1, False, False),
])
data[:] = []
# Calling an internal method purely so that we can trigger a "raw" save.
p1.save_base(raw=True)
self.assertEqual(data, [
(p1, True),
(p1, False, True),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
data[:] = []
p2.id = 99998
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
def test_delete_signals(self):
data = []
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append(
(instance, instance.id is None)
)
# #8285: signals can be any callable
class PostDeleteHandler(object):
def __init__(self, data):
self.data = data
def __call__(self, signal, sender, instance, **kwargs):
self.data.append(
(instance, instance.id is None)
)
post_delete_handler = PostDeleteHandler(data)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
p1.delete()
self.assertEqual(data, [
(p1, False),
(p1, False),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
p2.id = 99998
p2.save()
p2.delete()
self.assertEqual(data, [
(p2, False),
(p2, False)
])
data[:] = []
self.assertQuerysetEqual(
Person.objects.all(), [
"James Jones",
],
six.text_type
)
finally:
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_decorators(self):
data = []
@receiver(signals.pre_save, weak=False)
def decorated_handler(signal, sender, instance, **kwargs):
data.append(instance)
@receiver(signals.pre_save, sender=Car, weak=False)
def decorated_handler_with_sender_arg(signal, sender, instance, **kwargs):
data.append(instance)
try:
c1 = Car.objects.create(make="Volkswagon", model="Passat")
self.assertEqual(data, [c1, c1])
finally:
signals.pre_save.disconnect(decorated_handler)
signals.pre_save.disconnect(decorated_handler_with_sender_arg, sender=Car)
def test_save_and_delete_signals_with_m2m(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append('pre_save signal, %s' % instance)
if kwargs.get('raw'):
data.append('Is raw')
def post_save_handler(signal, sender, instance, **kwargs):
data.append('post_save signal, %s' % instance)
if 'created' in kwargs:
if kwargs['created']:
data.append('Is created')
else:
data.append('Is updated')
if kwargs.get('raw'):
data.append('Is raw')
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append('pre_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
def post_delete_handler(signal, sender, instance, **kwargs):
data.append('post_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
a1 = Author.objects.create(name='Neal Stephenson')
self.assertEqual(data, [
"pre_save signal, Neal Stephenson",
"post_save signal, Neal Stephenson",
"Is created"
])
data[:] = []
b1 = Book.objects.create(name='Snow Crash')
self.assertEqual(data, [
"pre_save signal, Snow Crash",
"post_save signal, Snow Crash",
"Is created"
])
data[:] = []
# Assigning and removing to/from m2m shouldn't generate an m2m signal.
b1.authors = [a1]
self.assertEqual(data, [])
b1.authors = []
self.assertEqual(data, [])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_disconnect_in_dispatch(self):
"""
Test that signals that disconnect when being called don't mess future
dispatching.
"""
class Handler(object):
def __init__(self, param):
self.param = param
self._run = False
def __call__(self, signal, sender, **kwargs):
self._run = True
signal.disconnect(receiver=self, sender=sender)
a, b = Handler(1), Handler(2)
signals.post_save.connect(a, sender=Person, weak=False)
signals.post_save.connect(b, sender=Person, weak=False)
Person.objects.create(first_name='John', last_name='Smith')
self.assertTrue(a._run)
self.assertTrue(b._run)
self.assertEqual(signals.post_save.receivers, [])
class LazyModelRefTest(BaseSignalTest):
def setUp(self):
super(LazyModelRefTest, self).setUp()
self.received = []
def receiver(self, **kwargs):
self.received.append(kwargs)
def test_invalid_sender_model_name(self):
with self.assertRaisesMessage(ValueError,
"Specified sender must either be a model or a "
"model name of the 'app_label.ModelName' form."):
signals.post_init.connect(self.receiver, sender='invalid')
def test_already_loaded_model(self):
signals.post_init.connect(
self.receiver, sender='signals.Book', weak=False
)
try:
instance = Book()
self.assertEqual(self.received, [{
'signal': signals.post_init,
'sender': Book,
'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Book)
def test_not_loaded_model(self):
signals.post_init.connect(
self.receiver, sender='signals.Created', weak=False
)
try:
class Created(models.Model):
pass
instance = Created()
self.assertEqual(self.received, [{
'signal': signals.post_init, 'sender': Created, 'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Created)
| bsd-3-clause |
a-doumoulakis/tensorflow | tensorflow/contrib/opt/python/training/moving_average_optimizer.py | 84 | 5839 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Moving average optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer
from tensorflow.python.training import saver
class MovingAverageOptimizer(optimizer.Optimizer):
"""Optimizer that computes a moving average of the variables.
Empirically it has been found that using the moving average of the trained
parameters of a deep network is better than using its trained parameters
directly. This optimizer allows you to compute this moving average and swap
the variables at save time so that any code outside of the training loop will
use by default the averaged values instead of the original ones.
Example of usage:
```python
// Encapsulate your favorite optimizer (here the momentum one)
// inside the MovingAverageOptimizer.
opt = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
opt = tf.contrib.opt.MovingAverageOptimizer(opt)
// Then create your model and all its variables.
model = build_model()
// Add the training op that optimizes using opt.
// This needs to be called before swapping_saver().
opt.minimize(cost, var_list)
// Then create your saver like this:
saver = opt.swapping_saver()
// Pass it to your training loop.
slim.learning.train(
model,
...
saver=saver)
```
Note that for evaluation, the normal saver should be used instead of
swapping_saver().
"""
def __init__(self, opt, average_decay=0.9999, num_updates=None,
sequential_update=True):
"""Construct a new MovingAverageOptimizer.
Args:
opt: A tf.Optimizer that will be used to compute and apply gradients.
average_decay: Float. Decay to use to maintain the moving averages
of trained variables.
See tf.train.ExponentialMovingAverage for details.
num_updates: Optional count of number of updates applied to variables.
See tf.train.ExponentialMovingAverage for details.
sequential_update: Bool. If False, will compute the moving average at the
same time as the model is updated, potentially doing
benign data races.
If True, will update the moving average after gradient
updates.
"""
self._optimizer = opt
self._ema = moving_averages.ExponentialMovingAverage(
average_decay, num_updates=num_updates)
self._variable_map = None
self._sequential_update = sequential_update
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
train_op = self._optimizer.apply_gradients(
grads_and_vars, global_step=global_step, name=name)
var_list = [x[1] for x in grads_and_vars if x[0] is not None]
self._variable_map = {}
if self._sequential_update:
with ops.control_dependencies([train_op]):
ma_op = self._ema.apply(var_list)
else:
ma_op = self._ema.apply(var_list)
for v in var_list:
v_avg = self._ema.average(v)
self._variable_map[v.op.name] = v_avg
self._variable_map[v_avg.op.name] = v
return control_flow_ops.group(train_op, ma_op, name="train_with_avg")
def swapping_saver(self, var_list=None, name='swapping_saver', **kwargs):
"""Create a saver swapping moving averages and variables.
You should use this saver during training. It will save the moving averages
of the trained parameters under the original parameter names. For
evaluations or inference you should use a regular saver and it will
automatically use the moving averages for the trained variable.
You must call this function after all variables have been created and after
you have called Optimizer.minimize().
Args:
var_list: List of variables to save, as per `Saver()`.
If set to None, will save all the variables that have been
created before this call.
name: The name of the saver.
**kwargs: Keyword arguments of `Saver()`.
Returns:
A `tf.train.Saver` object.
Raises:
RuntimeError: If apply_gradients or minimize has not been called before.
"""
if self._variable_map is None:
raise RuntimeError('Must call apply_gradients or minimize before '
'creating the swapping_saver')
if var_list is None:
var_list = variables.global_variables()
if not isinstance(var_list, dict):
var_list = saver.BaseSaverBuilder.OpListToDict(var_list)
# Now swap variables and moving averages
swapped_var_list = {}
for k, v in six.iteritems(var_list):
v_swap = self._variable_map.get(v.op.name, None)
if v_swap:
swapped_var_list[k] = v_swap
else:
swapped_var_list[k] = v
# Build the swapping saver.
return saver.Saver(swapped_var_list, name=name, **kwargs)
| apache-2.0 |
Serag8/Bachelor | google_appengine/lib/django-1.3/django/db/backends/mysql/creation.py | 311 | 3019 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
| mit |
nlu90/heron | heron/instance/tests/python/utils/topology_context_impl_unittest.py | 5 | 2275 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import unittest
from heron.instance.src.python.utils.topology import TopologyContextImpl
import heron.instance.tests.python.utils.mock_generator as mock_generator
import heron.instance.tests.python.mock_protobuf as mock_protobuf
class TopologyContextImplTest(unittest.TestCase):
def setUp(self):
self.context = TopologyContextImpl(
config={},
topology=mock_protobuf.get_mock_topology(),
task_to_component={},
my_task_id="task_id",
metrics_collector=None,
topo_pex_path="path.to.pex")
def test_task_hook(self):
task_hook = mock_generator.MockTaskHook()
self.assertFalse(len(self.context.task_hooks) > 0)
self.context.add_task_hook(task_hook)
self.assertTrue(len(self.context.task_hooks) > 0)
self.context.invoke_hook_prepare()
self.context.invoke_hook_emit(None, None, None)
self.assertTrue(task_hook.emit_called)
self.context.invoke_hook_spout_ack(None, 0.1)
self.assertTrue(task_hook.spout_ack_called)
self.context.invoke_hook_spout_fail(None, 0.1)
self.assertTrue(task_hook.spout_fail_called)
self.context.invoke_hook_bolt_execute(None, 0.1)
self.assertTrue(task_hook.bolt_exec_called)
self.context.invoke_hook_bolt_ack(None, 0.1)
self.assertTrue(task_hook.bolt_ack_called)
self.context.invoke_hook_bolt_fail(None, 0.1)
self.assertTrue(task_hook.bolt_fail_called)
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/reportlab-3.2.0/tests/test_platypus_wrapping.py | 14 | 3840 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for context-dependent indentation
"""
__version__='''$Id: test_platypus_indents.py 3660 2010-02-08 18:17:33Z damian $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys, os, random
from operator import truth
import unittest
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.utils import _className
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate \
import PageTemplate, BaseDocTemplate, Indenter, FrameBreak, NextPageTemplate
from reportlab.platypus import tableofcontents
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.paragraph import *
from reportlab.platypus.paragraph import _getFragWords
from reportlab.platypus.flowables import Spacer
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.rect(2.5*cm, 2.5*cm, 15*cm, 25*cm)
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
class MyDocTemplate(BaseDocTemplate):
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
template1 = PageTemplate('normal', [frame1], myMainPageFrame)
frame2 = Frame(2.5*cm, 16*cm, 15*cm, 10*cm, id='F2', showBoundary=1)
frame3 = Frame(2.5*cm, 2.5*cm, 15*cm, 10*cm, id='F3', showBoundary=1)
template2 = PageTemplate('updown', [frame2, frame3])
self.addPageTemplates([template1, template2])
class WrappingTestCase(unittest.TestCase):
"Test wrapping of long urls"
def test0(self):
"This makes one long multi-page paragraph."
# Build story.
story = []
styleSheet = getSampleStyleSheet()
h1 = styleSheet['Heading1']
h1.spaceBefore = 18
bt = styleSheet['BodyText']
bt.spaceBefore = 6
story.append(Paragraph('Test of paragraph wrapping',h1))
story.append(Spacer(18,18))
txt = "Normally we wrap paragraphs by looking for spaces between the words. However, with long technical command references and URLs, sometimes this gives ugly results. We attempt to split really long words on certain tokens: slashes, dots etc."
story.append(Paragraph(txt,bt))
story.append(Paragraph('This is an attempt to break long URLs sanely. Here is a file name: <font face="Courier">C:\\Windows\\System32\\Drivers\\etc\\hosts</font>. ', bt))
story.append(Paragraph('This paragraph has a URL (basically, a word) too long to fit on one line, so it just overflows. http://some-really-long-site.somewhere-verbose.com/webthingies/framework/xc4987236hgsdlkafh/foo?format=dingbats&content=rubbish. Ideally, we would wrap it in the middle.', bt))
doc = MyDocTemplate(outputfile('test_platypus_wrapping.pdf'))
doc.multiBuild(story)
#noruntests
def makeSuite():
return makeSuiteForClasses(WrappingTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| mit |
ivanlmj/PyCaptive | app/modules/checksys.py | 1 | 2085 |
import socket
import subprocess as sp
import sys
from app import app
class Components():
def __init__(self):
self._binaries = (app.config['CHECKSYS_DICT']["IPTABLES"], app.config['CHECKSYS_DICT']["CONNTRACK"])
self._services = None
if app.config['TEST_MODE']:
self._services = {
"mongodb":(
app.config['CHECKSYS_DICT']["MONGODB_IP"], app.config['CHECKSYS_DICT']["MONGODB_PORT"]
)
}
else:
self._services = {
"nginx_redir_gunicorn":(
app.config['CHECKSYS_DICT']["NGINX_IP"], app.config['CHECKSYS_DICT']["NGINX_REDIR"]
),
"nginx_gunicorn":(
app.config['CHECKSYS_DICT']["NGINX_IP"], app.config['CHECKSYS_DICT']["NGINX_GUNICORN"]
),
"mongodb":(
app.config['CHECKSYS_DICT']["MONDODB_IP"], app.config['CHECKSYS_DICT']["MONGODB_PORT"]
)
}
def binaries(self):
""" Check existence of binaries. """
results = dict()
for b in self._binaries:
result = sp.call(["which", b], stderr=sp.DEVNULL, stdout=sp.DEVNULL)
result_bin = b.split('/')[-1]
if result == 0:
results[result_bin] = 0
else:
results[result_bin] = 1
return results
def services(self):
""" Check status of network services. """
# TODO: must ensure that this is working....
results = dict()
for k,v in self._services.items():
s = socket.socket()
try:
s.connect((v[0], int(v[1])))
results[k] = 0
except Exception:
results[k] = 1
finally:
s.close()
return results
| gpl-3.0 |
emersonsoftware/ansiblefork | lib/ansible/utils/module_docs_fragments/openstack.py | 26 | 4023 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and
I(auth_type). This parameter is not needed if I(auth) is provided or if
OpenStack OS_* environment variables are present.
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I(password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided or
OpenStack OS_* environment variables are present.
required: false
auth_type:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
api_timeout:
description:
- How long should the socket layer wait before timing out for API calls.
If this is omitted, nothing will be passed to the requests library.
required: false
default: None
validate_certs:
description:
- Whether or not SSL API requests should be verified.
required: false
default: True
aliases: ['verify']
cacert:
description:
- A path to a CA Cert bundle that can be used as part of verifying
SSL API requests.
required: false
default: None
cert:
description:
- A path to a client certificate to use as part of the SSL transaction.
required: false
default: None
key:
description:
- A path to a client key to use as part of the SSL transaction.
required: false
default: None
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [public, internal, admin]
required: false
default: public
requirements:
- python >= 2.7
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be used instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays. More information can be found at
U(http://docs.openstack.org/developer/os-client-config)
'''
| gpl-3.0 |
grupozeety/CDerpnext | erpnext/accounts/report/profit_and_loss_statement/profit_and_loss_statement.py | 7 | 1698 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from erpnext.accounts.report.financial_statements import (get_period_list, get_columns, get_data)
def execute(filters=None):
period_list = get_period_list(filters.fiscal_year, filters.periodicity)
income = get_data(filters.company, "Income", "Credit", period_list,
accumulated_values=filters.accumulated_values, ignore_closing_entries=True)
expense = get_data(filters.company, "Expense", "Debit", period_list,
accumulated_values=filters.accumulated_values, ignore_closing_entries=True)
net_profit_loss = get_net_profit_loss(income, expense, period_list, filters.company)
data = []
data.extend(income or [])
data.extend(expense or [])
if net_profit_loss:
data.append(net_profit_loss)
columns = get_columns(filters.periodicity, period_list, filters.accumulated_values, filters.company)
return columns, data
def get_net_profit_loss(income, expense, period_list, company):
if income and expense:
total = 0
net_profit_loss = {
"account_name": "'" + _("Net Profit / Loss") + "'",
"account": None,
"warn_if_negative": True,
"currency": frappe.db.get_value("Company", company, "default_currency")
}
has_value = False
for period in period_list:
net_profit_loss[period.key] = flt(income[-2][period.key] - expense[-2][period.key], 3)
if net_profit_loss[period.key]:
has_value=True
total += flt(net_profit_loss[period.key])
net_profit_loss["total"] = total
if has_value:
return net_profit_loss
| agpl-3.0 |
Kussie/HTPC-Manager | libs/requests/packages/urllib3/util/request.py | 1008 | 2089 | from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| mit |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/LegacyWindowsExploits/Exploits/EAFU 2.2.0/EAFU_SSL.py | 1 | 1272 | import re, socket, string, sys
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.exit(2)
target_address = (sys.argv[1])
target_port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target_address, target_port))
ssl_sock = socket.ssl(s)
# print the cert info
#print repr(ssl_sock.server())
#print repr(ssl_sock.issuer())
# Set a simple HTTP request -- use httplib in actual code.
ssl_sock.write("""GET / HTTP/1.1\r\nHost:\r\n\r\n""")
# Read a chunk of data. Will not necessarily
# read all the data returned by the server.
data = ssl_sock.read()
# what did we get back?
# print data
# parse the reply for the version number
# Server: WDaemon/9.5.1
if re.search('Server: WDaemon/\d\d?\.\d\.\d', data):
m = re.search('Server: WDaemon\/(\d\d?\.\d\.\d)', data)
print "\n\n\nWorldClient version is: " + m.group(1)
elif re.search('Server: Microsoft-IIS\/(\d\d?\.\d).*MDaemon\/WorldClient v(\d\d?\.\d\.\d)', data):
n = re.search('Server: Microsoft-IIS\/(\d\d?\.\d).*MDaemon\/WorldClient v(\d\d?\.\d\.\d)', data)
print "\n\n\nWorldClient version and IIS version is: " + n.group(2) + n.group(1)
# Note that you need to close the underlying socket, not the SSL object.
del ssl_sock
s.close()
| unlicense |
rd37/horizon | horizon/conf/__init__.py | 77 | 2063 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from django.utils.functional import empty # noqa
from django.utils.functional import LazyObject # noqa
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
from horizon.conf.default import HORIZON_CONFIG as DEFAULT_CONFIG # noqa
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
| apache-2.0 |
hassanabidpk/django | tests/gis_tests/test_geoip.py | 73 | 5275 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
import warnings
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geoip import HAS_GEOIP
from django.contrib.gis.geos import HAS_GEOS, GEOSGeometry
from django.test import ignore_warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
if HAS_GEOIP:
from django.contrib.gis.geoip import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
@ignore_warnings(category=RemovedInDjango20Warning)
class GeoIPTest(unittest.TestCase):
addr = '128.249.1.1'
fqdn = 'tmc.edu'
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertTrue(g._country)
self.assertTrue(g._city)
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertIsNone(g4._country)
g5 = GeoIP(cntry, city='')
self.assertIsNone(g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
for query in (self.fqdn, self.addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query), 'Failed for func %s and query %s' % (func, query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query), 'Failed for func %s and query %s' % (func, query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
for query in (self.fqdn, self.addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city("duesseldorf.de")
self.assertEqual('Düsseldorf', d['city'])
d = g.country('200.26.205.1')
# Some databases have only unaccented countries
self.assertIn(d['country_name'], ('Curaçao', 'Curacao'))
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
GeoIP()
self.assertEqual(len(warns), 1)
msg = str(warns[0].message)
self.assertIn('django.contrib.gis.geoip is deprecated', msg)
| bsd-3-clause |
amnet04/ALECMAPREADER1 | funcionesCV_recurrentes.py | 1 | 4438 | import numpy as np
import pandas
import cv2
def cargar_imagen(archivo):
'''
Carga en variables dos matrices de la imágen, una gris y otra a color,
devuelve un diccionario con las dos versiones.
'''
imagen = {}
imagen['gris'] = cv2.imread(archivo,0)
imagen['color'] = cv2.imread(archivo)
return(imagen)
def dilatar_imagen(img, umbral_blanco, umbral_negro, dim_kernel, iteraciones):
ret,thresh = cv2.threshold(img, umbral_blanco,umbral_negro,cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, dim_kernel)
dilatada= cv2.dilate(thresh,kernel,iterations = iteraciones)
return(dilatada)
def erosionar_imagen(img, umbral_blanco, umbral_negro, dim_kernel, iteraciones):
ret,thresh = cv2.threshold(img, umbral_blanco,umbral_negro,cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, dim_kernel)
erosionada = cv2.erode(thresh,kernel,iterations = iteraciones)
return(erosionada)
def dibujar_rectangulos(img,x1,y1,x2,y2,color,ancho_bordes,archivo=''):
cv2.rectangle(img,(x1,y1),(x2,y2),(color),ancho_bordes)
# if archivo !='':
# cv2.imwrite(archivo,img)
def cortar_imagen(img,x1,x2,y1,y2):
corte = img[y1:y2,x1:x2]
img_cortada = {}
img_cortada['im'] = corte
img_cortada['x1'] = x1
img_cortada['y1'] = y1
img_cortada['x2'] = x2
img_cortada['y2'] = y2
return(img_cortada)
def bw_otsu(img, umbral_blanco,limite,blur=0,blur_ori =0):
'''
blur es el shape del blur en tupla por ejemplo (5,5)
blur_ori es un entero. Si no se ponen valores no hace el blur
'''
if blur == (0,0):
blureada = img
else:
blureada = cv2.GaussianBlur(img,blur,blur_ori)
ret,th = cv2.threshold(blureada,umbral_blanco,limite,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return (th)
def bw_adapta(img,limite,tam,sh):
th = cv2.adaptiveThreshold(img,limite,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,tam,sh)
return (th)
def ver_imagen(img,title='solo pa vé'):
cv2.imshow(title, img)
cv2.waitKey()
cv2.destroyAllWindows()
def detectar(template, imagen, max_var_thresh):
'''
Detacta si el la imagen tiene coincidencias en el mapa y devuelve la
coordenada superior izquierda de la coincidencia, su altura y su ancho
en la imagen del mapa general
'''
imagen_gris = cv2.cvtColor(imagen, cv2.COLOR_RGB2GRAY)
imagen_bw = bw_adapta(imagen_gris, 255, 71, 30)
h, w = template.shape
coincidencia = cv2.matchTemplate(template, imagen_bw, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(coincidencia)
x1 = max_loc[0]
x2 = max_loc[0] + w
y1 = max_loc[1]
y2 = max_loc[1] + h
if max_val < max_var_thresh:
#cv2.imwrite('Pruebas/tast.jpg',imagen[y1:y2,x1:x2])
return(None, max_val)
else:
#print (max_val)
sup_izq = (x1,y1)
inf_der = (x2,y2)
roi = imagen[y1:y2,x1:x2]
return(sup_izq, inf_der, roi)
def detectar_recursivo(template, imagen, thresh):
imagen_gris = cv2.cvtColor(imagen, cv2.COLOR_RGB2GRAY)
imagen_bw = bw_adapta(imagen_gris, 255, 71, 30)
h, w = template.shape
res = cv2.matchTemplate(imagen_bw,template,cv2.TM_CCOEFF_NORMED)
loc = np.where(res>=thresh)
puntos = []
for punto in zip(*loc[::-1]):
puntos.append(punto)
return (puntos, h, w)
def detectar_area_contornos(imagen,
umbral_blanco,
umbral_negro,
dim_kernel,
iteraciones,
w, h):
if dim_kernel != (0,0):
imagen_dilatada = dilatar_imagen(imagen,
umbral_blanco,
umbral_negro,
dim_kernel,
iteraciones)
else:
imagen_dilatada = imagen
imagen, contours, hierarchy = cv2.findContours(imagen_dilatada,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
areas = []
for contour in contours:
[x,y,wc,hc] = cv2.boundingRect(contour)
x1 = x
y1 = y
x2 = x+wc
y2 = y+hc
if (wc > w) and (hc > h):
areas.append((x1, y1 , x2, y2))
return (areas)
| mit |
takluyver/readthedocs.org | readthedocs/restapi/views/footer_views.py | 6 | 4195 | from django.shortcuts import get_object_or_404
from django.template import Context, loader as template_loader
from django.conf import settings
from django.core.context_processors import csrf
from rest_framework import decorators, permissions
from rest_framework.renderers import JSONPRenderer, JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from bookmarks.models import Bookmark
from builds.models import Version
from projects.models import Project
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def footer_html(request):
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', None)
page_slug = request.GET.get('page', None)
theme = request.GET.get('theme', False)
docroot = request.GET.get('docroot', '')
subproject = request.GET.get('subproject', False)
source_suffix = request.GET.get('source_suffix', '.rst')
new_theme = (theme == "sphinx_rtd_theme")
using_theme = (theme == "default")
project = get_object_or_404(Project, slug=project_slug)
version = get_object_or_404(Version.objects.public(request.user, project=project, only_active=False), slug=version_slug)
main_project = project.main_language_project or project
if page_slug and page_slug != "index":
if main_project.documentation_type == "sphinx_htmldir" or main_project.documentation_type == "mkdocs":
path = page_slug + "/"
elif main_project.documentation_type == "sphinx_singlehtml":
path = "index.html#document-" + page_slug
else:
path = page_slug + ".html"
else:
path = ""
host = request.get_host()
if settings.PRODUCTION_DOMAIN in host and request.user.is_authenticated():
show_bookmarks = True
try:
bookmark = Bookmark.objects.get(
user=request.user,
project=project,
version=version,
page=page_slug,
)
except (Bookmark.DoesNotExist, Bookmark.MultipleObjectsReturned, Exception):
bookmark = None
else:
show_bookmarks = False
bookmark = None
if version.type == 'tag' and version.project.has_pdf(version.slug):
print_url = 'https://keminglabs.com/print-the-docs/quote?project={project}&version={version}'.format(
project=project.slug,
version=version.slug,
)
else:
print_url = None
show_promo = True
# User is a gold user, no promos for them!
if request.user.is_authenticated():
if request.user.gold.count() or request.user.goldonce.count():
show_promo = False
# Explicit promo disabling
if project.slug in getattr(settings, 'DISABLE_PROMO_PROJECTS', []):
show_promo = False
# A GoldUser has mapped this project
if project.gold_owners.count():
show_promo = False
context = Context({
'show_bookmarks': show_bookmarks,
'bookmark': bookmark,
'project': project,
'path': path,
'downloads': version.get_downloads(pretty=True),
'current_version': version.slug,
'versions': project.ordered_active_versions(),
'main_project': main_project,
'translations': main_project.translations.all(),
'current_language': project.language,
'using_theme': using_theme,
'new_theme': new_theme,
'settings': settings,
'subproject': subproject,
'print_url': print_url,
'github_edit_url': version.get_github_url(docroot, page_slug, source_suffix, 'edit'),
'github_view_url': version.get_github_url(docroot, page_slug, source_suffix, 'view'),
'bitbucket_url': version.get_bitbucket_url(docroot, page_slug, source_suffix),
})
context.update(csrf(request))
html = template_loader.get_template('restapi/footer.html').render(context)
return Response({
'html': html,
'version_active': version.active,
'version_supported': version.supported,
'promo': show_promo,
})
| mit |
RO-ny9/python-for-android | python-build/python-libs/xmpppy/doc/examples/commandsbot.py | 87 | 7937 | #!/usr/bin/python
""" The example of using xmpppy's Ad-Hoc Commands (JEP-0050) implementation.
"""
import xmpp
from xmpp.protocol import *
options = {
'JID': '[email protected]',
'Password': '********',
}
class TestCommand(xmpp.commands.Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works. This one
actually does some calculations."""
name = 'testcommand'
description = 'Circle calculations'
def __init__(self, jid=''):
""" Initialize some internals. Set the first request handler to self.calcTypeForm.
"""
xmpp.commands.Command_Handler_Prototype.__init__(self,jid)
self.initial = {
'execute': self.initialForm
}
def initialForm(self, conn, request):
""" Assign a session id and send the first form. """
sessionid = self.getSessionID()
self.sessions[sessionid] = {
'jid':request.getFrom(),
'data':{'type':None}
}
# simulate that the client sent sessionid, so calcTypeForm will be able
# to continue
request.getTag(name="command").setAttr('sessionid', sessionid)
return self.calcTypeForm(conn, request)
def calcTypeForm(self, conn, request):
""" Send first form to the requesting user. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# What to do when a user sends us a response? Note, that we should always
# include 'execute', as it is a default action when requester does not send
# exact action to do (should be set to the same as 'next' or 'complete' fields)
session['actions'] = {
'cancel': self.cancel,
'next': self.calcTypeFormAccept,
'execute': self.calcTypeFormAccept,
}
# The form to send
calctypefield = xmpp.DataField(
name='calctype',
desc='Calculation Type',
value=session['data']['type'],
options=[
['Calculate the diameter of a circle','circlediameter'],
['Calculate the area of a circle','circlearea']
],
typ='list-single',
required=1)
# We set label attribute... seems that the xmpppy.DataField cannot do that
calctypefield.setAttr('label', 'Calculation Type')
form = xmpp.DataForm(
title='Select type of operation',
data=[
'Use the combobox to select the type of calculation you would like'\
'to do, then click Next.',
calctypefield])
# Build a reply with the form
reply = request.buildReply('result')
replypayload = [
xmpp.Node('actions',
attrs={'execute':'next'},
payload=[xmpp.Node('next')]),
form]
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'executing'},
payload=replypayload)
self._owner.send(reply) # Question: self._owner or conn?
raise xmpp.NodeProcessed
def calcTypeFormAccept(self, conn, request):
""" Load the calcType form filled in by requester, then reply with
the second form. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# load the form
node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA)
form = xmpp.DataForm(node=node)
# retrieve the data
session['data']['type'] = form.getField('calctype').getValue()
# send second form
return self.calcDataForm(conn, request)
def calcDataForm(self, conn, request, notavalue=None):
""" Send a form asking for diameter. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# set the actions taken on requester's response
session['actions'] = {
'cancel': self.cancel,
'prev': self.calcTypeForm,
'next': self.calcDataFormAccept,
'execute': self.calcDataFormAccept
}
# create a form
radiusfield = xmpp.DataField(desc='Radius',name='radius',typ='text-single')
radiusfield.setAttr('label', 'Radius')
form = xmpp.DataForm(
title = 'Enter the radius',
data=[
'Enter the radius of the circle (numbers only)',
radiusfield])
# build a reply stanza
reply = request.buildReply('result')
replypayload = [
xmpp.Node('actions',
attrs={'execute':'complete'},
payload=[xmpp.Node('complete'),xmpp.Node('prev')]),
form]
if notavalue:
replypayload.append(xmpp.Node('note',
attrs={'type': 'warn'},
payload=['You have to enter valid number.']))
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':request.getTagAttr('command','sessionid'),
'status':'executing'},
payload=replypayload)
self._owner.send(reply)
raise xmpp.NodeProcessed
def calcDataFormAccept(self, conn, request):
""" Load the calcType form filled in by requester, then reply with the result. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# load the form
node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA)
form = xmpp.DataForm(node=node)
# retrieve the data; if the entered value is not a number, return to second stage
try:
value = float(form.getField('radius').getValue())
except:
self.calcDataForm(conn, request, notavalue=True)
# calculate the answer
from math import pi
if session['data']['type'] == 'circlearea':
result = (value**2) * pi
else:
result = 2 * value * pi
# build the result form
form = xmpp.DataForm(
typ='result',
data=[xmpp.DataField(desc='result', name='result', value=result)])
# build the reply stanza
reply = request.buildReply('result')
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'completed'},
payload=[form])
self._owner.send(reply)
# erase the data about session
del self.sessions[sessionid]
raise xmpp.NodeProcessed
def cancel(self, conn, request):
""" Requester canceled the session, send a short reply. """
# get the session id
sessionid = request.getTagAttr('command','sessionid')
# send the reply
reply = request.buildReply('result')
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'cancelled'})
self._owner.send(reply)
# erase the data about session
del self.sessions[sessionid]
raise xmpp.NodeProcessed
class ConnectionError: pass
class AuthorizationError: pass
class NotImplemented: pass
class Bot:
""" The main bot class. """
def __init__(self, JID, Password):
""" Create a new bot. Connect to the server and log in. """
# connect...
jid = xmpp.JID(JID)
self.connection = xmpp.Client(jid.getDomain(), debug=['always', 'browser', 'testcommand'])
result = self.connection.connect()
if result is None:
raise ConnectionError
# authorize
result = self.connection.auth(jid.getNode(), Password)
if result is None:
raise AuthorizationError
# plugins
# disco - needed by commands
# warning: case of "plugin" method names are important!
# to attach a command to Commands class, use .plugin()
# to attach anything to Client class, use .PlugIn()
self.disco = xmpp.browser.Browser()
self.disco.PlugIn(self.connection)
self.disco.setDiscoHandler({
'info': {
'ids': [{
'category': 'client',
'type': 'pc',
'name': 'Bot'
}],
'features': [NS_DISCO_INFO],
}
})
self.commands = xmpp.commands.Commands(self.disco)
self.commands.PlugIn(self.connection)
self.command_test = TestCommand()
self.command_test.plugin(self.commands)
# presence
self.connection.sendInitPresence(requestRoster=0)
def loop(self):
""" Do nothing except handling new xmpp stanzas. """
try:
while self.connection.Process(1):
pass
except KeyboardInterrupt:
pass
bot = Bot(**options)
bot.loop()
| apache-2.0 |
pizzapanther/HoverMom | hovermom/django/db/transaction.py | 77 | 20601 | """
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
import warnings
from functools import wraps
from django.db import (
connections, DEFAULT_DB_ALIAS,
DatabaseError, Error, ProgrammingError)
from django.utils.decorators import available_attrs
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
pass
################
# Private APIs #
################
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
###########################
# Deprecated private APIs #
###########################
def abort(using=None):
"""
Roll back any ongoing transactions and clean the transaction management
state of the connection.
This method is to be used only in cases where using balanced
leave_transaction_management() calls isn't possible. For example after a
request has finished, the transaction state isn't known, yet the connection
must be cleaned up for the next request.
"""
get_connection(using).abort()
def enter_transaction_management(managed=True, using=None, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
get_connection(using).enter_transaction_management(managed, forced)
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
get_connection(using).leave_transaction_management()
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return get_connection(using).is_dirty()
def set_dirty(using=None):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
get_connection(using).set_dirty()
def set_clean(using=None):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
get_connection(using).set_clean()
def is_managed(using=None):
warnings.warn("'is_managed' is deprecated.",
PendingDeprecationWarning, stacklevel=2)
def managed(flag=True, using=None):
warnings.warn("'managed' no longer serves a purpose.",
PendingDeprecationWarning, stacklevel=2)
def commit_unless_managed(using=None):
warnings.warn("'commit_unless_managed' is now a no-op.",
PendingDeprecationWarning, stacklevel=2)
def rollback_unless_managed(using=None):
warnings.warn("'rollback_unless_managed' is now a no-op.",
PendingDeprecationWarning, stacklevel=2)
###############
# Public APIs #
###############
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction and resets the dirty flag.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction and resets the dirty flag.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
#################################
# Decorators / context managers #
#################################
class Atomic(object):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# When entering an atomic block with autocommit turned off,
# Django should only use savepoints and shouldn't commit.
# This requires at least a savepoint for the outermost block.
if not self.savepoint:
raise TransactionManagementError(
"The outermost 'atomic' block cannot use "
"savepoint = False when autocommit is off.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
# We aren't in a transaction yet; create one.
# The usual way to start a transaction is to turn autocommit off.
# However, some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# In such cases, start an explicit transaction instead, which has
# the side-effect of disabling autocommit.
if connection.features.autocommits_when_autocommit_is_off:
connection._start_transaction_under_autocommit()
connection.autocommit = False
else:
connection.set_autocommit(False)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
elif connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = set([using])
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
############################################
# Deprecated decorators / context managers #
############################################
class Transaction(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
autocommit, commit_on_success, and commit_manually contain the
implementations of entering and exiting.
"""
def __init__(self, entering, exiting, using):
self.entering = entering
self.exiting = exiting
self.using = using
def __enter__(self):
self.entering(self.using)
def __exit__(self, exc_type, exc_value, traceback):
self.exiting(exc_type, self.using)
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def _transaction_func(entering, exiting, using):
"""
Takes 3 things, an entering function (what to do to start this block of
transaction management), an exiting function (what to do to end it, on both
success and failure, and using which can be: None, indiciating using is
DEFAULT_DB_ALIAS, a callable, indicating that using is DEFAULT_DB_ALIAS and
to return the function already wrapped.
Returns either a Transaction objects, which is both a decorator and a
context manager, or a wrapped function, if using is a callable.
"""
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return Transaction(entering, exiting, DEFAULT_DB_ALIAS)(using)
return Transaction(entering, exiting, using)
def autocommit(using=None):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
warnings.warn("autocommit is deprecated in favor of set_autocommit.",
PendingDeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(managed=False, using=using)
def exiting(exc_type, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
warnings.warn("commit_on_success is deprecated in favor of atomic.",
PendingDeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_type, using):
try:
if exc_type is not None:
if is_dirty(using=using):
rollback(using=using)
else:
if is_dirty(using=using):
try:
commit(using=using)
except:
rollback(using=using)
raise
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_manually(using=None):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
warnings.warn("commit_manually is deprecated in favor of set_autocommit.",
PendingDeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_type, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success_unless_managed(using=None, savepoint=False):
"""
Transitory API to preserve backwards-compatibility while refactoring.
Once the legacy transaction management is fully deprecated, this should
simply be replaced by atomic. Until then, it's necessary to guarantee that
a commit occurs on exit, which atomic doesn't do when it's nested.
Unlike atomic, savepoint defaults to False because that's closer to the
legacy behavior.
"""
connection = get_connection(using)
if connection.get_autocommit() or connection.in_atomic_block:
return atomic(using, savepoint)
else:
def entering(using):
pass
def exiting(exc_type, using):
set_dirty(using=using)
return _transaction_func(entering, exiting, using)
| mit |
oberlin/django | tests/migrations/test_base.py | 292 | 4620 | import os
import shutil
import tempfile
from contextlib import contextmanager
from importlib import import_module
from django.apps import apps
from django.db import connection
from django.db.migrations.recorder import MigrationRecorder
from django.test import TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils.module_loading import module_dir
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
def tearDown(self):
# Reset applied-migrations state.
recorder = MigrationRecorder(connection)
recorder.migration_qs.filter(app='migrations').delete()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to, value=True):
return self.assertFKExists(table, columns, to, False)
@contextmanager
def temporary_migration_module(self, app_label='migrations', module=None):
"""
Allows testing management commands in a temporary migrations module.
Wrap all invocations to makemigrations and squashmigrations with this
context manager in order to avoid creating migration files in your
source tree inadvertently.
Takes the application label that will be passed to makemigrations or
squashmigrations and the Python path to a migrations module.
The migrations module is used as a template for creating the temporary
migrations module. If it isn't provided, the application's migrations
module is used, if it exists.
Returns the filesystem path to the temporary migrations module.
"""
temp_dir = tempfile.mkdtemp()
try:
target_dir = tempfile.mkdtemp(dir=temp_dir)
with open(os.path.join(target_dir, '__init__.py'), 'w'):
pass
target_migrations_dir = os.path.join(target_dir, 'migrations')
if module is None:
module = apps.get_app_config(app_label).name + '.migrations'
try:
source_migrations_dir = module_dir(import_module(module))
except (ImportError, ValueError):
pass
else:
shutil.copytree(source_migrations_dir, target_migrations_dir)
with extend_sys_path(temp_dir):
new_module = os.path.basename(target_dir) + '.migrations'
with self.settings(MIGRATION_MODULES={app_label: new_module}):
yield target_migrations_dir
finally:
shutil.rmtree(temp_dir)
| bsd-3-clause |
zerc/django | django/contrib/gis/gdal/raster/const.py | 238 | 1537 | """
GDAL - Constant definitions
"""
from ctypes import (
c_byte, c_double, c_float, c_int16, c_int32, c_uint16, c_uint32,
)
# See http://www.gdal.org/gdal_8h.html#a22e22ce0a55036a96f652765793fb7a4
GDAL_PIXEL_TYPES = {
0: 'GDT_Unknown', # Unknown or unspecified type
1: 'GDT_Byte', # Eight bit unsigned integer
2: 'GDT_UInt16', # Sixteen bit unsigned integer
3: 'GDT_Int16', # Sixteen bit signed integer
4: 'GDT_UInt32', # Thirty-two bit unsigned integer
5: 'GDT_Int32', # Thirty-two bit signed integer
6: 'GDT_Float32', # Thirty-two bit floating point
7: 'GDT_Float64', # Sixty-four bit floating point
8: 'GDT_CInt16', # Complex Int16
9: 'GDT_CInt32', # Complex Int32
10: 'GDT_CFloat32', # Complex Float32
11: 'GDT_CFloat64', # Complex Float64
}
# A list of gdal datatypes that are integers.
GDAL_INTEGER_TYPES = [1, 2, 3, 4, 5]
# Lookup values to convert GDAL pixel type indices into ctypes objects.
# The GDAL band-io works with ctypes arrays to hold data to be written
# or to hold the space for data to be read into. The lookup below helps
# selecting the right ctypes object for a given gdal pixel type.
GDAL_TO_CTYPES = [
None, c_byte, c_uint16, c_int16, c_uint32, c_int32,
c_float, c_double, None, None, None, None
]
# List of resampling algorithms that can be used to warp a GDALRaster.
GDAL_RESAMPLE_ALGORITHMS = {
'NearestNeighbour': 0,
'Bilinear': 1,
'Cubic': 2,
'CubicSpline': 3,
'Lanczos': 4,
'Average': 5,
'Mode': 6,
}
| bsd-3-clause |
tashigaofei/BlogSpider | scrapy/tests/test_downloadermiddleware_redirect.py | 15 | 9245 | import unittest
from scrapy.contrib.downloadermiddleware.redirect import RedirectMiddleware, MetaRefreshMiddleware
from scrapy.spider import Spider
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response, HtmlResponse
from scrapy.utils.test import get_crawler
class RedirectMiddlewareTest(unittest.TestCase):
def setUp(self):
crawler = get_crawler()
self.spider = Spider('foo')
self.mw = RedirectMiddleware.from_crawler(crawler)
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = Response('http://a.com', headers={'Location': 'http://a.com/redirected'}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_redirect_301(self):
def _test(method):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, method=method)
rsp = Response(url, headers={'Location': url2}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, method)
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
_test('GET')
_test('POST')
_test('HEAD')
def test_dont_redirect(self):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, meta={'dont_redirect': True})
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
def test_redirect_302(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_redirect_302_head(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='HEAD')
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'HEAD')
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/302')
rsp = Response('http://scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = Response('http://www.scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = Response('http://scrapytest.org/first', headers={'Location': '/redirected'}, status=302)
req2 = self.mw.process_response(req1, rsp1, self.spider)
rsp2 = Response('http://scrapytest.org/redirected', headers={'Location': '/redirected2'}, status=302)
req3 = self.mw.process_response(req2, rsp2, self.spider)
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
class MetaRefreshMiddlewareTest(unittest.TestCase):
def setUp(self):
crawler = get_crawler()
self.spider = Spider('foo')
self.mw = MetaRefreshMiddleware.from_crawler(crawler)
def _body(self, interval=5, url='http://example.org/newpage'):
return """<html><head><meta http-equiv="refresh" content="{0};url={1}"/></head></html>"""\
.format(interval, url)
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_meta_refresh(self):
req = Request(url='http://example.org')
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
def test_meta_refresh_with_high_interval(self):
# meta-refresh with high intervals don't trigger redirects
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=self._body(interval=1000))
rsp2 = self.mw.process_response(req, rsp, self.spider)
assert rsp is rsp2
def test_meta_refresh_trough_posted_request(self):
req = Request(url='http://example.org', method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = HtmlResponse(req.url, body=self._body())
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/max')
rsp = HtmlResponse(req.url, body=self._body())
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = HtmlResponse(req.url, body=self._body())
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = HtmlResponse(req1.url, body=self._body(url='/redirected'))
req2 = self.mw.process_response(req1, rsp1, self.spider)
assert isinstance(req2, Request), req2
rsp2 = HtmlResponse(req2.url, body=self._body(url='/redirected2'))
req3 = self.mw.process_response(req2, rsp2, self.spider)
assert isinstance(req3, Request), req3
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
if __name__ == "__main__":
unittest.main()
| mit |
writefaruq/lionface-app | django/contrib/gis/tests/test_spatialrefsys.py | 12 | 6799 | from django.db import connection
from django.contrib.gis.tests.utils import mysql, no_mysql, oracle, postgis, spatialite
from django.utils import unittest
test_srs = ({'srid' : 4326,
'auth_name' : ('EPSG', True),
'auth_srid' : 4326,
'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
'spheroid' : 'WGS 84', 'name' : 'WGS 84',
'geographic' : True, 'projected' : False, 'spatialite' : True,
'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 1, 9),
},
{'srid' : 32140,
'auth_name' : ('EPSG', False),
'auth_srid' : 32140,
'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]',
'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ',
'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central',
'geographic' : False, 'projected' : True, 'spatialite' : False,
'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 5, 10),
},
)
if oracle:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
class SpatialRefSysTest(unittest.TestCase):
@no_mysql
def test01_retrieve(self):
"Testing retrieval of SpatialRefSys model objects."
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
self.assertEqual(sd['proj4'], srs.proj4text)
@no_mysql
def test02_osr(self):
"Testing getting OSR objects from SpatialRefSys model objects."
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
self.assertEqual(sd['proj4'], srs.proj4)
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite
if not spatialite:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
@no_mysql
def test03_ellipsoid(self):
"Testing the ellipsoid property."
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
param1 = ellps1[i]
param2 = ellps2[i]
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(SpatialRefSysTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
valorekhov/AvrBee | src/AvrBee/HexFileFormat.py | 1 | 2846 | from abc import ABCMeta, abstractmethod
import struct
import binascii, os, sys
class HexFileFormat(object):
"""Parses out Hex File format into a byte stream"""
def __init__(self, path=None, file=None):
self.path = path
self.file = file
pass
def get_bytes(self):
with open(self.path) as f:
self.lines = f.readlines()
ret = list()
for line in self.lines:
if not line.startswith(':'):
raise Exception("Unsupported format")
line = line[1:].replace('\n','')
bytes = bytearray.fromhex(line)
pageSize = bytes[0]
memAddrHigh = bytes[1] #Big endian address as per http://en.wikipedia.org/wiki/Intel_HEX
memAddrLow = bytes[2]
memAddr = (memAddrHigh << 8) + memAddrLow
eolFlag = bytes[3]
if eolFlag == 1:
break
checksum = bytes[-1]
payload = bytes[4:4+pageSize]
payloadsum = (pageSize + memAddrLow + memAddrHigh + eolFlag + sum(x for x in payload) + checksum ) & 0xFF
if payloadsum != 0:
raise Exception("Checksum mismatch")
ret.append((memAddr,payload))
return ret
def save_bytes(self, data, startAddress = 0):
f = open(self.path, 'w') if self.file == None else self.file
pageSize = 16
length = len(data)
address = startAddress
pages = int(length / 16)
if pages > int(pages):
pages = int(pages) + 1
for i in range(0, pages):
slice = data[i*pageSize: i*pageSize + pageSize]
length = len(slice)
eol = 0 if i+1 < pages else 1
addrLow = address & 0xFF
addrHigh = (address >> 8) & 0xFF
checksum = (length + addrLow + addrHigh + eol + sum(x for x in slice) )& 0xff
checksum = 0xff - checksum
bytes = bytearray()
bytes.append(length)
bytes.extend(struct.pack(">H", address))
bytes.append(eol)
bytes.extend(slice)
bytes.append(checksum)
#struct.pack( "BiB" + str(length)+"cB" , length, address & 0xFFFF, eol, str(slice), checksum)
f.write(':')
f.write(str(binascii.b2a_hex( bytes )).upper())
f.write(os.linesep)
address += length
if f != sys.stdout:
f.close()
class FileFormat(metaclass=ABCMeta):
"""Abstract class representing parsers"""
@abstractmethod
def get_bytes(self):
pass
@abstractmethod
def save_bytes(self, data):
pass
FileFormat.register(HexFileFormat)
| unlicense |
NaturalSolutions/NsPortal | Back/ns_portal/resources/root/security/oauth2/v1/login/login_resource.py | 1 | 2467 | from ns_portal.core.resources import (
MetaEndPointResource
)
from marshmallow import (
Schema,
fields,
EXCLUDE,
ValidationError
)
from ns_portal.database.main_db import (
TUsers
)
from sqlalchemy import (
and_
)
from sqlalchemy.orm.exc import (
MultipleResultsFound
)
from pyramid.security import (
Allow,
Everyone,
remember
)
from ns_portal.utils import (
getCookieToken
)
from pyramid.response import (
Response
)
class loginSchema(Schema):
username = fields.String(required=True)
password = fields.String(required=True)
class Meta:
unknown = EXCLUDE
class LoginResource(MetaEndPointResource):
__acl__ = [
(Allow, Everyone, 'create')
]
def validateUserCredentials(self, data):
query = self.request.dbsession.query(TUsers)
query = query.filter(
and_(
TUsers.TUse_Login == data.get('username'),
TUsers.TUse_Password == data.get('password')
)
)
try:
res = query.one_or_none()
except MultipleResultsFound:
raise ValidationError({
"error": (
'your username and password are'
' not unique in db'
' please report it to an admin'
)
})
if res:
# this key is added after validation
return res
else:
raise ValidationError({
"error": (
'your username and/or password'
' are wrongs'
)
})
def POST(self):
reqParams = self.__parser__(
args=loginSchema(),
location='form'
)
userFound = self.validateUserCredentials(data=reqParams)
if userFound:
token = getCookieToken(
idUser=getattr(userFound, 'TUse_PK_ID'),
request=self.request
)
resp = Response(
status=200
)
remember(
resp,
token
)
self.request.response = resp
return self.request.response
else:
raise ValidationError({
"error": (
'your username and/or password'
' are wrongs'
)
})
| mit |
AustereCuriosity/astropy | astropy/wcs/tests/extension/test_extension.py | 1 | 2869 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import subprocess
import sys
import pytest
def test_wcsapi_extension(tmpdir):
# Test that we can build a simple C extension with the astropy.wcs C API
setup_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(
os.path.join(setup_path, '..', '..', '..', '..'))
env = os.environ.copy()
paths = [str(tmpdir), astropy_path]
if env.get('PYTHONPATH'):
paths.append(env.get('PYTHONPATH'))
env[str('PYTHONPATH')] = str(os.pathsep.join(paths))
# Build the extension
# This used to use subprocess.check_call, but on Python 3.4 there was
# a mysterious Heisenbug causing this to fail with a non-zero exit code
# *unless* the output is redirected. This bug also did not occur in an
# interactive session, so it likely had something to do with pytest's
# output capture
p = subprocess.Popen([sys.executable, 'setup.py', 'install',
'--install-lib={0}'.format(tmpdir),
astropy_path], cwd=setup_path, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Whether the process fails or not this isn't likely to produce a great
# deal of output so communicate should be fine in almost all cases
stdout, stderr = p.communicate()
try:
stdout, stderr = stdout.decode('utf8'), stderr.decode('utf8')
except UnicodeDecodeError:
# Don't try to guess about encoding; just display the text
stdout, stderr = stdout.decode('latin1'), stderr.decode('latin1')
# If compilation fails, we can skip this test, since the
# dependencies necessary to compile an extension may be missing.
# If it passes, however, we want to continue and ensure that the
# extension created is actually usable. However, if we're on
# Travis-CI, or another generic continuous integration setup, we
# don't want to ever skip, because having it fail in that
# environment probably indicates something more serious that we
# want to know about.
if (not (str('CI') in os.environ or
str('TRAVIS') in os.environ or
str('CONTINUOUS_INTEGRATION') in os.environ) and
p.returncode):
pytest.skip("system unable to compile extensions")
return
assert p.returncode == 0, (
"setup.py exited with non-zero return code {0}\n"
"stdout:\n\n{1}\n\nstderr:\n\n{2}\n".format(
p.returncode, stdout, stderr))
code = """
import sys
import wcsapi_test
sys.exit(wcsapi_test.test())
"""
code = code.strip().replace('\n', '; ')
# Import and run the extension
subprocess.check_call([sys.executable, '-c', code], env=env)
| bsd-3-clause |
mlcommons/training | object_detection/pytorch/maskrcnn_benchmark/config/paths_catalog.py | 1 | 8463 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "datasets"
DATASETS = {
"coco_2017_train": {
"img_dir": "coco/train2017",
"ann_file": "coco/annotations/instances_train2017.json"
},
"coco_2017_val": {
"img_dir": "coco/val2017",
"ann_file": "coco/annotations/instances_val2017.json"
},
"coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "coco/annotations/instances_train2014.json"
},
"coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_val2014.json"
},
"coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_minival2014.json"
},
"coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_valminusminival2014.json"
},
"keypoints_coco_2014_train": {
"img_dir": "coco/train2014",
"ann_file": "annotations/person_keypoints_train2014.json",
},
"keypoints_coco_2014_val": {
"img_dir": "coco/val2014",
"ann_file": "coco/annotations/instances_val2014.json"
},
"keypoints_coco_2014_minival": {
"img_dir": "coco/val2014",
"ann_file": "annotations/person_keypoints_minival2014.json",
},
"keypoints_coco_2014_valminusminival": {
"img_dir": "coco/val2014",
"ann_file": "annotations/person_keypoints_valminusminival2014.json",
},
"voc_2007_train": {
"data_dir": "voc/VOC2007",
"split": "train"
},
"voc_2007_train_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_train2007.json"
},
"voc_2007_val": {
"data_dir": "voc/VOC2007",
"split": "val"
},
"voc_2007_val_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_val2007.json"
},
"voc_2007_test": {
"data_dir": "voc/VOC2007",
"split": "test"
},
"voc_2007_test_cocostyle": {
"img_dir": "voc/VOC2007/JPEGImages",
"ann_file": "voc/VOC2007/Annotations/pascal_test2007.json"
},
"voc_2012_train": {
"data_dir": "voc/VOC2012",
"split": "train"
},
"voc_2012_train_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_train2012.json"
},
"voc_2012_val": {
"data_dir": "voc/VOC2012",
"split": "val"
},
"voc_2012_val_cocostyle": {
"img_dir": "voc/VOC2012/JPEGImages",
"ann_file": "voc/VOC2012/Annotations/pascal_val2012.json"
},
"voc_2012_test": {
"data_dir": "voc/VOC2012",
"split": "test"
# PASCAL VOC2012 doesn't made the test annotations available, so there's no json annotation
},
"cityscapes_fine_instanceonly_seg_train_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
},
"cityscapes_fine_instanceonly_seg_val_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_val.json"
},
"cityscapes_fine_instanceonly_seg_test_cocostyle": {
"img_dir": "cityscapes/images",
"ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json"
}
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs["img_dir"]),
ann_file=os.path.join(data_dir, attrs["ann_file"]),
)
return dict(
factory="COCODataset",
args=args,
)
elif "voc" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
data_dir=os.path.join(data_dir, attrs["data_dir"]),
split=attrs["split"],
)
return dict(
factory="PascalVOCDataset",
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
"37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
# keypoints
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/"):]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
dataset_tag = "keypoints_" if "keypoint" in name else ""
suffix = ModelCatalog.C2_DETECTRON_SUFFIX.format(dataset_tag, dataset_tag)
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/"):]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| apache-2.0 |
schreiberx/sweet | benchmarks_plane/nonlinear_interaction/pp_plot_errors_single.py | 2 | 2935 | #! /usr/bin/env python3
import sys
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.lines import Line2D
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
if len(sys.argv) > 1:
muletag = sys.argv[1]
output_filename = sys.argv[2]
else:
print("")
print("Usage:")
print("")
print(" "+sys.argv[0]+" [jobdata mule tag for y axis] [output_filename.pdf] [jobdir1] [jobdir2] ... [jobdirN]")
print("")
sys.exit(1)
if len(sys.argv) > 3:
# Load Jobs specified via program parameters
jd = JobsData(job_dirs=sys.argv[3:])
else:
# Load all Jobs
jd = JobsData()
# Consolidate data...
jdc = JobsDataConsolidate(jd)
# ... which belongs to the same time integration method
jdc_groups = jdc.create_groups(['runtime.timestepping_method'])
#
# Filter to exclude data which indicates instabilities
#
def data_filter(x, y, jd):
if y == None:
return True
if 'runtime.max_simulation_time' in jd:
if jd['runtime.max_simulation_time'] <= 24*60*60:
if y > 100:
return True
elif jd['runtime.max_simulation_time'] <= 10*24*60*60:
if y > 1000:
return True
return False
# Exctract data suitable for plotting
jdc_groups_data = JobsData_GroupsPlottingScattered(
jdc_groups,
'runtime.timestep_size',
muletag,
data_filter=data_filter
)
data = jdc_groups_data.get_data()
def label(d):
val = d['runtime.timestepping_method'].replace('_', '\\_')+', $\Delta t = '+str(d['runtime.timestep_size'])+'$'
return val
##########################################################
# Plotting starts here
##########################################################
print("*"*80)
print("*"*80)
print("*"*80)
fontsize=18
figsize=(10, 10)
fig, ax = plt.subplots(figsize=(10,10))
#plt.rc('text', usetex=True)
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
markers = []
for m in Line2D.markers:
try:
if len(m) == 1 and m != ' ' and m != '':
markers.append(m)
except TypeError:
pass
linestyles = ['-', '--', ':', '-.']
c = 0
title = ''
for key, d in data.items():
x = d['x_values']
y = d['y_values']
l = key.replace('_', '\\_')
print(" + "+l)
print(x)
print(y)
ax.plot(x, y, marker=markers[c % len(markers)], linestyle=linestyles[c % len(linestyles)], label=l)
c = c + 1
if title != '':
plt.title(title, fontsize=fontsize)
plt.xlabel("Timestep size $\Delta t$ (sec)", fontsize=fontsize)
#
# Name of data
#
dataname = "TODO"
if 'prog_h' in muletag:
dataname = "surface height $h$"
#
# Norm
#
if 'linf' in muletag:
norm = "$L_\infty$"
else:
norm = "$L_{TODO}$"
plt.ylabel(norm+" error on "+dataname, fontsize=fontsize)
plt.legend(fontsize=15)
plt.savefig(output_filename, transparent=True, bbox_inches='tight', pad_inches=0)
plt.close()
| mit |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Tools/pybench/With.py | 43 | 4137 | from __future__ import with_statement
from pybench import Test
class WithFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class TryFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self):
# "Context manager" objects used just for their cleanup
# actions in finally blocks usually don't have parameters.
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class WithRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
rounds = 100000
class BlockExceptions(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
return True
def test(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
with be: raise error
with be: raise error
with be: raise error,"something"
with be: raise error,"something"
with be: raise error,"something"
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
def calibrate(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
pass
| apache-2.0 |
francisco-dlp/hyperspy | hyperspy/samfire_utils/weights/red_chisq.py | 6 | 1212 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
class ReducedChiSquaredWeight(object):
def __init__(self):
self.expected = 1.0
self.model = None
def function(self, ind):
return np.abs(self.model.red_chisq.data[ind] - self.expected)
def map(self, mask, slices=slice(None, None)):
thing = self.model.red_chisq.data[slices].copy()
thing = thing.astype('float64')
thing[np.logical_not(mask)] = np.nan
return np.abs(thing - self.expected)
| gpl-3.0 |
johnnykv/heralding | heralding/tests/test_pop3.py | 1 | 2904 | # Copyright (C) 2017 Johnny Vestergaard <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import unittest
from heralding.capabilities.pop3 import Pop3
from heralding.misc.common import cancel_all_pending_tasks
from heralding.reporting.reporting_relay import ReportingRelay
class Pop3Tests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.reporting_relay = ReportingRelay()
self.reporting_relay_task = self.loop.run_in_executor(
None, self.reporting_relay.start)
def tearDown(self):
self.reporting_relay.stop()
# We give reporting_relay a chance to be finished
self.loop.run_until_complete(self.reporting_relay_task)
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.loop.run_until_complete(cancel_all_pending_tasks(self.loop))
self.loop.close()
def test_login(self):
"""Testing different login combinations"""
async def pop3_login():
login_sequences = [
# invalid login, invalid password
(('USER wakkwakk', b'+OK User accepted'),
('PASS wakkwakk', b'-ERR Authentication failed.')),
# PASS without user
(
('PASS bond', b'-ERR No username given.'),),
# Try to run a TRANSACITON state command in AUTHORIZATION state
(
('RETR', b'-ERR Unknown command'),),
]
for sequence in login_sequences:
reader, writer = await asyncio.open_connection(
'127.0.0.1', 8888, loop=self.loop)
# skip banner
await reader.readline()
for pair in sequence:
writer.write(bytes(pair[0] + "\r\n", 'utf-8'))
response = await reader.readline()
self.assertEqual(response.rstrip(), pair[1])
options = {
'port': 110,
'protocol_specific_data': {
'banner': '+OK POP3 server ready',
'max_attempts': 3
},
'users': {
'james': 'bond'
}
}
sut = Pop3(options, self.loop)
server_coro = asyncio.start_server(
sut.handle_session, '0.0.0.0', 8888, loop=self.loop)
self.server = self.loop.run_until_complete(server_coro)
self.loop.run_until_complete(pop3_login())
| gpl-3.0 |
unicri/edx-platform | lms/djangoapps/ccx/tests/test_utils.py | 6 | 22042 | """
test utils
"""
from nose.plugins.attrib import attr
from ccx.models import ( # pylint: disable=import-error
CcxMembership,
CcxFutureMembership,
)
from ccx.tests.factories import ( # pylint: disable=import-error
CcxFactory,
CcxMembershipFactory,
CcxFutureMembershipFactory,
)
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.tests.factories import ( # pylint: disable=import-error
AdminFactory,
UserFactory,
CourseEnrollmentFactory,
AnonymousUserFactory,
)
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr('shard_1')
class TestEmailEnrollmentState(ModuleStoreTestCase):
"""unit tests for the EmailEnrollmentState class
"""
def setUp(self):
"""
Set up tests
"""
super(TestEmailEnrollmentState, self).setUp()
# remove user provided by the parent test case so we can make our own
# when needed.
self.user = None
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
def create_user(self):
"""provide a legitimate django user for testing
"""
if getattr(self, 'user', None) is None:
self.user = UserFactory()
def register_user_in_ccx(self):
"""create registration of self.user in self.ccx
registration will be inactive
"""
self.create_user()
CcxMembershipFactory(ccx=self.ccx, student=self.user)
def create_one(self, email=None):
"""Create a single EmailEnrollmentState object and return it
"""
from ccx.utils import EmailEnrollmentState # pylint: disable=import-error
if email is None:
email = self.user.email
return EmailEnrollmentState(self.ccx, email)
def test_enrollment_state_for_non_user(self):
"""verify behavior for non-user email address
"""
ee_state = self.create_one(email='[email protected]')
for attr in ['user', 'member', 'full_name', 'in_ccx']:
value = getattr(ee_state, attr, 'missing attribute')
self.assertFalse(value, "{}: {}".format(value, attr))
def test_enrollment_state_for_non_member_user(self):
"""verify behavior for email address of user who is not a ccx memeber
"""
self.create_user()
ee_state = self.create_one()
self.assertTrue(ee_state.user)
self.assertFalse(ee_state.in_ccx)
self.assertEqual(ee_state.member, self.user)
self.assertEqual(ee_state.full_name, self.user.profile.name)
def test_enrollment_state_for_member_user(self):
"""verify behavior for email address of user who is a ccx member
"""
self.create_user()
self.register_user_in_ccx()
ee_state = self.create_one()
for attr in ['user', 'in_ccx']:
self.assertTrue(
getattr(ee_state, attr, False),
"attribute {} is missing or False".format(attr)
)
self.assertEqual(ee_state.member, self.user)
self.assertEqual(ee_state.full_name, self.user.profile.name)
def test_enrollment_state_to_dict(self):
"""verify dict representation of EmailEnrollmentState
"""
self.create_user()
self.register_user_in_ccx()
ee_state = self.create_one()
ee_dict = ee_state.to_dict()
expected = {
'user': True,
'member': self.user,
'in_ccx': True,
}
for expected_key, expected_value in expected.iteritems():
self.assertTrue(expected_key in ee_dict)
self.assertEqual(expected_value, ee_dict[expected_key])
def test_enrollment_state_repr(self):
self.create_user()
self.register_user_in_ccx()
ee_state = self.create_one()
representation = repr(ee_state)
self.assertTrue('user=True' in representation)
self.assertTrue('in_ccx=True' in representation)
member = 'member={}'.format(self.user)
self.assertTrue(member in representation)
@attr('shard_1')
# TODO: deal with changes in behavior for auto_enroll
class TestGetEmailParams(ModuleStoreTestCase):
"""tests for ccx.utils.get_email_params
"""
def setUp(self):
"""
Set up tests
"""
super(TestGetEmailParams, self).setUp()
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.all_keys = [
'site_name', 'course', 'course_url', 'registration_url',
'course_about_url', 'auto_enroll'
]
self.url_keys = [k for k in self.all_keys if 'url' in k]
self.course_keys = [k for k in self.url_keys if 'course' in k]
def call_fut(self, auto_enroll=False, secure=False):
"""
call function under test
"""
from ccx.utils import get_email_params # pylint: disable=import-error
return get_email_params(self.ccx, auto_enroll, secure)
def test_params_have_expected_keys(self):
params = self.call_fut()
self.assertFalse(set(params.keys()) - set(self.all_keys))
def test_ccx_id_in_params(self):
expected_course_id = self.ccx.course_id.to_deprecated_string()
params = self.call_fut()
self.assertEqual(params['course'], self.ccx)
for url_key in self.url_keys:
self.assertTrue('http://' in params[url_key])
for url_key in self.course_keys:
self.assertTrue(expected_course_id in params[url_key])
def test_security_respected(self):
secure = self.call_fut(secure=True)
for url_key in self.url_keys:
self.assertTrue('https://' in secure[url_key])
insecure = self.call_fut(secure=False)
for url_key in self.url_keys:
self.assertTrue('http://' in insecure[url_key])
def test_auto_enroll_passed_correctly(self):
not_auto = self.call_fut(auto_enroll=False)
self.assertFalse(not_auto['auto_enroll'])
auto = self.call_fut(auto_enroll=True)
self.assertTrue(auto['auto_enroll'])
@attr('shard_1')
# TODO: deal with changes in behavior for auto_enroll
class TestEnrollEmail(ModuleStoreTestCase):
"""tests for the enroll_email function from ccx.utils
"""
def setUp(self):
super(TestEnrollEmail, self).setUp()
# unbind the user created by the parent, so we can create our own when
# needed.
self.user = None
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.outbox = self.get_outbox()
def create_user(self):
"""provide a legitimate django user for testing
"""
if getattr(self, 'user', None) is None:
self.user = UserFactory()
def register_user_in_ccx(self):
"""create registration of self.user in self.ccx
registration will be inactive
"""
self.create_user()
CcxMembershipFactory(ccx=self.ccx, student=self.user)
def get_outbox(self):
"""Return the django mail outbox"""
from django.core import mail
return mail.outbox
def check_membership(self, email=None, user=None, future=False):
"""Verify tjat an appropriate CCX Membership exists"""
if not email and not user:
self.fail(
"must provide user or email address to check CCX Membership"
)
if future and email:
membership = CcxFutureMembership.objects.filter(
ccx=self.ccx, email=email
)
elif not future:
if not user:
user = self.user
membership = CcxMembership.objects.filter(
ccx=self.ccx, student=user
)
self.assertTrue(membership.exists())
def check_enrollment_state(self, state, in_ccx, member, user):
"""Verify an enrollment state object against provided arguments
state.in_ccx will always be a boolean
state.user will always be a boolean
state.member will be a Django user object or None
"""
self.assertEqual(in_ccx, state.in_ccx)
self.assertEqual(member, state.member)
self.assertEqual(user, state.user)
def call_fut(
self,
student_email=None,
auto_enroll=False,
email_students=False,
email_params=None
):
"""Call function under test"""
from ccx.utils import enroll_email # pylint: disable=import-error
if student_email is None:
student_email = self.user.email
before, after = enroll_email(
self.ccx, student_email, auto_enroll, email_students, email_params
)
return before, after
def test_enroll_non_user_sending_email(self):
"""enroll a non-user email and send an enrollment email to them
"""
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
test_email = "[email protected]"
before, after = self.call_fut(
student_email=test_email, email_students=True
)
# there should be a future membership set for this email address now
self.check_membership(email=test_email, future=True)
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(test_email in msg.recipients())
def test_enroll_non_member_sending_email(self):
"""register a non-member and send an enrollment email to them
"""
self.create_user()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=True)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
self.check_enrollment_state(before, False, self.user, True)
self.check_enrollment_state(after, True, self.user, True)
# mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.user.email in msg.recipients())
def test_enroll_member_sending_email(self):
"""register a member and send an enrollment email to them
"""
self.register_user_in_ccx()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=True)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
for state in [before, after]:
self.check_enrollment_state(state, True, self.user, True)
# mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.user.email in msg.recipients())
def test_enroll_non_user_no_email(self):
"""register a non-user via email address but send no email
"""
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
test_email = "[email protected]"
before, after = self.call_fut(
student_email=test_email, email_students=False
)
# there should be a future membership set for this email address now
self.check_membership(email=test_email, future=True)
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# ensure there are still no emails in the outbox now
self.assertEqual(self.outbox, [])
def test_enroll_non_member_no_email(self):
"""register a non-member but send no email"""
self.create_user()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=False)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
self.check_enrollment_state(before, False, self.user, True)
self.check_enrollment_state(after, True, self.user, True)
# ensure there are still no emails in the outbox now
self.assertEqual(self.outbox, [])
def test_enroll_member_no_email(self):
"""enroll a member but send no email
"""
self.register_user_in_ccx()
# ensure no emails are in the outbox now
self.assertEqual(self.outbox, [])
before, after = self.call_fut(email_students=False)
# there should be a membership set for this email address now
self.check_membership(email=self.user.email)
for state in [before, after]:
self.check_enrollment_state(state, True, self.user, True)
# ensure there are still no emails in the outbox now
self.assertEqual(self.outbox, [])
@attr('shard_1')
# TODO: deal with changes in behavior for auto_enroll
class TestUnenrollEmail(ModuleStoreTestCase):
"""Tests for the unenroll_email function from ccx.utils"""
def setUp(self):
super(TestUnenrollEmail, self).setUp()
# unbind the user created by the parent, so we can create our own when
# needed.
self.user = None
course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
self.outbox = self.get_outbox()
self.email = "[email protected]"
def get_outbox(self):
"""Return the django mail outbox"""
from django.core import mail
return mail.outbox
def create_user(self):
"""provide a legitimate django user for testing
"""
if getattr(self, 'user', None) is None:
self.user = UserFactory()
def make_ccx_membership(self):
"""create registration of self.user in self.ccx
registration will be inactive
"""
self.create_user()
CcxMembershipFactory.create(ccx=self.ccx, student=self.user)
def make_ccx_future_membership(self):
"""create future registration for email in self.ccx"""
CcxFutureMembershipFactory.create(
ccx=self.ccx, email=self.email
)
def check_enrollment_state(self, state, in_ccx, member, user):
"""Verify an enrollment state object against provided arguments
state.in_ccx will always be a boolean
state.user will always be a boolean
state.member will be a Django user object or None
"""
self.assertEqual(in_ccx, state.in_ccx)
self.assertEqual(member, state.member)
self.assertEqual(user, state.user)
def check_membership(self, future=False):
"""
check membership
"""
if future:
membership = CcxFutureMembership.objects.filter(
ccx=self.ccx, email=self.email
)
else:
membership = CcxMembership.objects.filter(
ccx=self.ccx, student=self.user
)
return membership.exists()
def call_fut(self, email_students=False):
"""call function under test"""
from ccx.utils import unenroll_email # pylint: disable=import-error
email = getattr(self, 'user', None) and self.user.email or self.email
return unenroll_email(self.ccx, email, email_students=email_students)
def test_unenroll_future_member_with_email(self):
"""unenroll a future member and send an email
"""
self.make_ccx_future_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership(future=True))
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut(email_students=True)
# assert that membership is now gone
self.assertFalse(self.check_membership(future=True))
# validate the before and after enrollment states
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# check that mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.email in msg.recipients())
def test_unenroll_member_with_email(self):
"""unenroll a current member and send an email"""
self.make_ccx_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership())
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut(email_students=True)
# assert that membership is now gone
self.assertFalse(self.check_membership())
# validate the before and after enrollment state
self.check_enrollment_state(after, False, self.user, True)
self.check_enrollment_state(before, True, self.user, True)
# check that mail was sent and to the right person
self.assertEqual(len(self.outbox), 1)
msg = self.outbox[0]
self.assertTrue(self.user.email in msg.recipients())
def test_unenroll_future_member_no_email(self):
"""unenroll a future member but send no email
"""
self.make_ccx_future_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership(future=True))
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut()
# assert that membership is now gone
self.assertFalse(self.check_membership(future=True))
# validate the before and after enrollment states
for state in [before, after]:
self.check_enrollment_state(state, False, None, False)
# no email was sent to the student
self.assertEqual(self.outbox, [])
def test_unenroll_member_no_email(self):
"""unenroll a current member but send no email
"""
self.make_ccx_membership()
# assert that a membership exists and that no emails have been sent
self.assertTrue(self.check_membership())
self.assertEqual(self.outbox, [])
# unenroll the student
before, after = self.call_fut()
# assert that membership is now gone
self.assertFalse(self.check_membership())
# validate the before and after enrollment state
self.check_enrollment_state(after, False, self.user, True)
self.check_enrollment_state(before, True, self.user, True)
# no email was sent to the student
self.assertEqual(self.outbox, [])
@attr('shard_1')
class TestUserCCXList(ModuleStoreTestCase):
"""Unit tests for ccx.utils.get_all_ccx_for_user"""
def setUp(self):
"""Create required infrastructure for tests"""
super(TestUserCCXList, self).setUp()
self.course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=coach)
enrollment = CourseEnrollmentFactory.create(course_id=self.course.id)
self.user = enrollment.user
self.anonymous = AnonymousUserFactory.create()
def register_user_in_ccx(self, active=False):
"""create registration of self.user in self.ccx
registration will be inactive unless active=True
"""
CcxMembershipFactory(ccx=self.ccx, student=self.user, active=active)
def get_course_title(self):
"""Get course title"""
from courseware.courses import get_course_about_section # pylint: disable=import-error
return get_course_about_section(self.course, 'title')
def call_fut(self, user):
"""Call function under test"""
from ccx.utils import get_all_ccx_for_user # pylint: disable=import-error
return get_all_ccx_for_user(user)
def test_anonymous_sees_no_ccx(self):
memberships = self.call_fut(self.anonymous)
self.assertEqual(memberships, [])
def test_unenrolled_sees_no_ccx(self):
memberships = self.call_fut(self.user)
self.assertEqual(memberships, [])
def test_enrolled_inactive_sees_no_ccx(self):
self.register_user_in_ccx()
memberships = self.call_fut(self.user)
self.assertEqual(memberships, [])
def test_enrolled_sees_a_ccx(self):
self.register_user_in_ccx(active=True)
memberships = self.call_fut(self.user)
self.assertEqual(len(memberships), 1)
def test_data_structure(self):
self.register_user_in_ccx(active=True)
memberships = self.call_fut(self.user)
this_membership = memberships[0]
self.assertTrue(this_membership)
# structure contains the expected keys
for key in ['ccx_name', 'ccx_url']:
self.assertTrue(key in this_membership.keys())
url_parts = [self.course.id.to_deprecated_string(), str(self.ccx.id)]
# all parts of the ccx url are present
for part in url_parts:
self.assertTrue(part in this_membership['ccx_url'])
actual_name = self.ccx.display_name
self.assertEqual(actual_name, this_membership['ccx_name'])
| agpl-3.0 |
guewen/OpenUpgrade | addons/account/wizard/account_chart.py | 39 | 5159 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_chart(osv.osv_memory):
"""
For Chart of Accounts
"""
_name = "account.chart"
_description = "Account chart"
_columns = {
'fiscalyear': fields.many2one('account.fiscalyear', \
'Fiscal year', \
help='Keep empty for all open fiscal years'),
'period_from': fields.many2one('account.period', 'Start period'),
'period_to': fields.many2one('account.period', 'End period'),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_fiscalyear(self, cr, uid, context=None):
"""Return default Fiscalyear value"""
return self.pool.get('account.fiscalyear').find(cr, uid, context=context)
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear_id=False, context=None):
res = {}
if fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods and len(periods) > 1:
start_period = periods[0]
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period}
else:
res['value'] = {'period_from': False, 'period_to': False}
return res
def account_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
period_obj = self.pool.get('account.period')
fy_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
data = self.read(cr, uid, ids, [], context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
fiscalyear_id = data.get('fiscalyear', False) and data['fiscalyear'][0] or False
result['periods'] = []
if data['period_from'] and data['period_to']:
period_from = data.get('period_from', False) and data['period_from'][0] or False
period_to = data.get('period_to', False) and data['period_to'][0] or False
result['periods'] = period_obj.build_ctx_periods(cr, uid, period_from, period_to)
result['context'] = str({'fiscalyear': fiscalyear_id, 'periods': result['periods'], \
'state': data['target_move']})
if fiscalyear_id:
result['name'] += ':' + fy_obj.read(cr, uid, [fiscalyear_id], context=context)[0]['code']
return result
_defaults = {
'target_move': 'posted',
'fiscalyear': _get_fiscalyear,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
PetrDlouhy/django | django/utils/module_loading.py | 30 | 6416 | import copy
import os
import sys
from importlib import import_module
from django.utils import six
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module('%s.%s' % (app_config.name, module_to_search))
except:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
if sys.version_info[:2] >= (3, 3):
if sys.version_info[:2] >= (3, 4):
from importlib.util import find_spec as importlib_find
else:
from importlib import find_loader as importlib_find
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
return importlib_find(full_module_name, package_path) is not None
else:
import imp
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
def module_dir(module):
"""
Find the name of the directory that contains a module, if possible.
Raise ValueError otherwise, e.g. for namespace packages that are split
over several directories.
"""
# Convert to list because _NamespacePath does not support indexing on 3.3.
paths = list(getattr(module, '__path__', []))
if len(paths) == 1:
return paths[0]
else:
filename = getattr(module, '__file__', None)
if filename is not None:
return os.path.dirname(filename)
raise ValueError("Cannot determine directory containing %s" % module)
| bsd-3-clause |
couchand/petard | vendor/cxxtest-4.3/test/test_doc.py | 54 | 1097 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
#
# Import and execute the Python test driver for the user guide examples
#
# Imports
try:
import pyutilib.th as unittest
pyutilib_available=True
except:
pyutilib_available=False
import os
from os.path import dirname, abspath, abspath, basename
import sys
if pyutilib_available:
currdir = dirname(abspath(__file__))+os.sep
datadir = os.sep.join([dirname(dirname(abspath(__file__))),'doc','examples'])+os.sep
os.chdir(datadir)
sys.path.insert(0, datadir)
from test_examples import *
# Execute the tests
if __name__ == '__main__':
unittest.main()
| mit |
pdellaert/ansible | test/units/modules/network/fortios/test_fortios_application_name.py | 21 | 11085 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_application_name
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_application_name.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_application_name_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_name': {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub_category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_name.fortios_application(input_data, fos_instance)
expected_data = {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub-category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
}
set_method_mock.assert_called_with('application', 'name', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_application_name_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_name': {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub_category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_name.fortios_application(input_data, fos_instance)
expected_data = {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub-category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
}
set_method_mock.assert_called_with('application', 'name', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_application_name_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'application_name': {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub_category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_name.fortios_application(input_data, fos_instance)
delete_method_mock.assert_called_with('application', 'name', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_application_name_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'application_name': {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub_category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_name.fortios_application(input_data, fos_instance)
delete_method_mock.assert_called_with('application', 'name', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_application_name_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_name': {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub_category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_name.fortios_application(input_data, fos_instance)
expected_data = {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub-category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
}
set_method_mock.assert_called_with('application', 'name', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_application_name_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_name': {
'random_attribute_not_valid': 'tag',
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub_category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_name.fortios_application(input_data, fos_instance)
expected_data = {
'behavior': 'test_value_3',
'category': '4',
'id': '5',
'name': 'default_name_6',
'parameter': 'test_value_7',
'popularity': '8',
'protocol': 'test_value_9',
'risk': '10',
'sub-category': '11',
'technology': 'test_value_12',
'vendor': 'test_value_13',
'weight': '14'
}
set_method_mock.assert_called_with('application', 'name', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.