repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ArthurGarnier/SickRage | lib/imdb/parser/http/searchCompanyParser.py | 76 | 2929 | """
parser.http.searchCompanyParser module (imdb package).
This module provides the HTMLSearchCompanyParser class (and the
search_company_parser instance), used to parse the results of a search
for a given company.
E.g., when searching for the name "Columbia Pictures", the parsed page would be:
http://akas.imdb.com/find?s=co;mx=20;q=Columbia+Pictures
Copyright 2008-2012 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_company_name, build_company_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCompanyParser(DOMBasicMovieParser):
"""Simply get the name of a company and the imdbID.
It's used by the DOMHTMLSearchCompanyParser class to return a result
for a direct match (when a search on IMDb results in a single
company, the web server sends directly the company page.
"""
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCompanyParser
_notDirectHitTitle = '<title>find - imdb'
_titleBuilder = lambda self, x: build_company_name(x)
_linkPrefix = '/company/co'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'notes': "./text()[1]"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name')+(x.get('notes')
or u''), stripNotes=True)
))]
extractors = [Extractor(label='search',
path="//td[@class='result_text']/a[starts-with(@href, " \
"'/company/co')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,),
{'kind': 'company', '_basic_parser': DOMBasicCompanyParser})
}
| gpl-3.0 |
nvoron23/hadoop-fundamentals | tfidf/framework.py | 17 | 1300 | import os
import sys
from itertools import groupby
from operator import itemgetter
SEPARATOR = "\t"
class Streaming(object):
@staticmethod
def get_job_conf(name):
name = name.replace(".", "_").upper()
return os.environ.get(name)
def __init__(self, infile=sys.stdin, separator=SEPARATOR):
self.infile = infile
self.sep = separator
def status(self, message):
sys.stderr.write("reporter:status:%s" % message)
def counter(self, counter, amount=1, group="Python Streaming"):
sys.stderr.write("reporter:counter:%s,%s,%i" % group, counter, amount)
def emit(self, key, value):
sys.stdout.write("%s%s%s\n" % (key, self.sep, value))
def read(self):
for line in self.infile:
yield line.rstrip()
def __iter__(self):
for line in self.read():
yield line
class Mapper(Streaming):
def map(self):
raise NotImplementedError("Mappers must implement a map method")
class Reducer(Streaming):
def reduce(self):
raise NotImplementedError("Reducers must implement a reduce method")
def __iter__(self):
generator = (line.split(self.sep, 1) for line in self.read())
for item in groupby(generator, itemgetter(0)):
yield item
| mit |
mikeh77/mi-instrument | mi/instrument/seabird/sbe37smb/ooicore/test/sample_data.py | 2 | 2612 |
from mi.instrument.seabird.sbe37smb.ooicore.driver import NEWLINE
SAMPLE_DS = "SBE37-SMP V 2.6 SERIAL NO. 2165 05 Feb 2013 19:11:43" + NEWLINE + \
"not logging: received stop command" + NEWLINE + \
"sample interval = 20208 seconds" + NEWLINE + \
"samplenumber = 0, free = 200000" + NEWLINE + \
"transmit real-time data" + NEWLINE + \
"do not output salinity with each sample" + NEWLINE + \
"do not output sound velocity with each sample" + NEWLINE + \
"do not store time with each sample" + NEWLINE + \
"number of samples to average = 0" + NEWLINE + \
"reference pressure = 0.0 db" + NEWLINE + \
"serial sync mode disabled" + NEWLINE + \
"wait time after serial sync sampling = 0 seconds" + NEWLINE + \
"internal pump is installed" + NEWLINE + \
"temperature = 7.54 deg C" + NEWLINE + \
"WARNING: LOW BATTERY VOLTAGE!!"
SAMPLE_DC = "SBE37-SM V 2.6b 3464" + NEWLINE + \
"temperature: 08-nov-05" + NEWLINE + \
" TA0 = -2.572242e-04" + NEWLINE + \
" TA1 = 3.138936e-04" + NEWLINE + \
" TA2 = -9.717158e-06" + NEWLINE + \
" TA3 = 2.138735e-07" + NEWLINE + \
"conductivity: 08-nov-05" + NEWLINE + \
" G = -9.870930e-01" + NEWLINE + \
" H = 1.417895e-01" + NEWLINE + \
" I = 1.334915e-04" + NEWLINE + \
" J = 3.339261e-05" + NEWLINE + \
" CPCOR = 9.570000e-08" + NEWLINE + \
" CTCOR = 3.250000e-06" + NEWLINE + \
" WBOTC = 1.202400e-05" + NEWLINE + \
"pressure S/N 4955, range = 10847.1964958 psia: 12-aug-05" + NEWLINE + \
" PA0 = 5.916199e+00" + NEWLINE + \
" PA1 = 4.851819e-01" + NEWLINE + \
" PA2 = 4.596432e-07" + NEWLINE + \
" PTCA0 = 2.762492e+02" + NEWLINE + \
" PTCA1 = 6.603433e-01" + NEWLINE + \
" PTCA2 = 5.756490e-03" + NEWLINE + \
" PTCSB0 = 2.461450e+01" + NEWLINE + \
" PTCSB1 = -9.000000e-04" + NEWLINE + \
" PTCSB2 = 0.000000e+00" + NEWLINE + \
" POFFSET = 0.000000e+00" + NEWLINE + \
"rtc: 08-nov-05" + NEWLINE + \
" RTCA0 = 9.999862e-01" + NEWLINE + \
" RTCA1 = 1.686132e-06" + NEWLINE + \
" RTCA2 = -3.022745e-08"
SAMPLE = "#55.9044,41.40609, 572.170, 34.2583, 1505.948, 05 Feb 2013, 19:16:59" + NEWLINE
| bsd-2-clause |
wzmao/mbio | mbio/Application/math.py | 1 | 11942 | # -*- coding: utf-8 -*-
"""This module contains some math and statistics functions.
In the future plan: Eigenvalue, Inverse, Matrix Multiplication,
SVD, PCA
"""
__author__ = 'Wenzhi Mao'
__all__ = ['isSquare', 'ANOVA', 'performRegression', 'performPolyRegression']
def isSquare(x):
"""It is a function to determine if the given integer is a square integer."""
try:
xi = int(x)
except:
return None
if xi != x:
from ..IO.output import printError
printError('The number is not integer.')
return None
if x < 0:
from ..IO.output import printError
printError('The number is negative.')
return None
x = xi
sq = x ** .5
if abs(int(round(sq, 0)) ** 2 - x) < 1e-10:
return True
else:
return False
def eigh(x):
"""This is a function to calculate eigenvalues and eigenvectors."""
try:
from scipy.linalg.lapack import dsyevr
return dsyevr(x)[:2]
except:
from numpy.linalg import eigh as n_eigh
return n_eigh(x)
def invsp(x):
"""This is a function to inverse a symetric postive definite matrix."""
try:
from numpy.linalg import inv
return inv(x)
except:
try:
from scipy.linalg.lapack import dgetrf, dgetri
d, e = dgetrf(x)[:2]
return dgetri(d, e)[0]
except:
from ..IO.output import printError
printError("There is no `inv` function found.")
return None
class ANOVA(object):
"""It is a class for ANOVA analysis. Given the analysis data,
output the test result.
1D data supported now. More dimension could be achieved in the future.
`data` should be n*2 numpy array or list. The first column is the value
and the second column is the label."""
def __init__(self, data=None, **kwargs):
"""Calculate the ANOVA for the data."""
from ..IO.output import printInfo
self.data = data
self.result = None
self.pvalue = self.f0 = self.fd = self.sst = self.sstreat = self.mstreat = self.sse = self.mse = self.n = None
if type(data) == type(None):
self._calculated = False
else:
self.performCalculation(**kwargs)
def performCalculation(self, alpha=0.05, outprint=True, **kwargs):
"""Perform the ANOVA calculation for the data."""
from ..IO.output import printInfo, printError
from numpy import array
from scipy.stats import f as F
self._calculated = None
self.pvalue = self.f0 = self.fd = self.sst = self.sstreat = self.mstreat = self.sse = self.mse = self.n = None
try:
self.data = array(self.data, dtype=float)
except:
printError("The data could not be transfered to numpy.array")
if self.data.ndim != 2:
printError("ANOVA class could only support 1D data now.")
return None
if self.data.shape[1] != 2:
printError("The data should be 2 column data.")
return None
labelset = set()
for i in self.data[:, 1]:
if not i in labelset:
labelset.add(i)
labelset = list(labelset)
labelset.sort()
printInfo("{} label(s) found".format(len(labelset)))
muall = self.data[:, 0].mean()
sst = ((self.data[:, 0] - muall) ** 2).sum()
n = self.data.shape[0]
ns = array([(self.data[:, 1] == i).sum()
for i in labelset], dtype=float)
mus = array([self.data[:, 0][
(self.data[:, 1] == i)].mean() - muall for i in labelset], dtype=float)
sstreat = (mus ** 2).dot(ns)
mstreat = sstreat * 1.0 / (len(ns) - 1)
mse = (0.0 + sst - sstreat) * 1.0 / (n - len(ns))
f0 = mstreat / mse
self.pvalue = 1. - F.cdf(f0, len(ns) - 1, n - len(ns))
self.f0 = f0
self.fd = (len(ns) - 1, n - len(ns))
self.sst = sst
self.sstreat = sstreat
self.mstreat = mstreat
self.sse = (0.0 + sst - sstreat)
self.mse = mse
self.n = n
self._calculated = True
if outprint:
printInfo("SS_Total = {0:13.8f} for {1} data".format(sst, n))
printInfo("MS_Treatment = {0:13.8f} with {1:6d} of free degrees".format(
mstreat, self.fd[0]))
printInfo(
"MS_Error = {0:13.8f} with {1:6d} of free degrees".format(mse, self.fd[1]))
printInfo("F0 = MS_Treatment/MS_Error = {0:12.8f}".format(f0))
printInfo(
"p-value = {0:13.8f} = {1:8.6f}%".format(self.pvalue, self.pvalue * 100))
if self.pvalue < alpha:
printInfo(
"Reject the null hypothesis at alpha = {}, each class are different.".format(alpha))
else:
printInfo(
"Accept the null hypothesis at alpha = {}, each class are the same.".format(alpha))
return None
def performRegression(x, y, const=True, alpha=0.05, label=None, output=True, **kwargs):
"""Make regression analysis of array data. And test each parameter using t-test.
`x` must be a N*a array. `y` must be a N*1 array.
If `x` or `y` just has one dimension, it could be a 1D array and converted automatically.
`const` is `True` default and it will detect the are there constant in `x`.
If no constant in `x`, it will add a new column at the end.
`alpha` is used to test each parameter.
`label` could be used for output."""
from numpy import ndarray, array, hstack, ones
from numpy.linalg.linalg import inv
from ..IO.output import printError, printInfo
from scipy.stats import t
if not isinstance(x, ndarray) or not isinstance(y, ndarray):
try:
x = array(x, dtype=float)
y = array(y, dtype=float)
except:
printError(
"x and y must be numpy array or could be converted to numpy array.")
return None
x = array(x, dtype=float)
y = array(y, dtype=float)
if x.ndim == 2:
pass
elif x.ndim == 1:
x.resize((x.size, 1))
else:
printError("x must be 1D or 2D data.")
return None
if y.ndim == 2:
if y.shape[1] != 1:
printInfo("Just take the first column of y.")
y = y[:, 0:1]
elif y.ndim == 1:
y.resize((y.size, 1))
else:
printError("y must be 1D or 2D data.")
return None
if x.shape[0] != y.shape[0]:
printError("x and y must have same first dimension.")
return None
if label is None:
label = ['x' + str(i + 1) for i in xrange(x.shape[1])]
else:
label = [str(i) for i in label]
if len(label) != x.shape[1]:
printError(
"The length of label does not match data. Dismiss the label.")
label = ['x' + str(i + 1) for i in xrange(x.shape[1])]
addconst = 0
if const:
hasconst = False
for i in xrange(x.shape[1]):
if len(set(x[:, i])) == 1:
hasconst = True
break
if not hasconst:
x = hstack((x, ones((x.shape[0], 1))))
addconst = 1
label.append('c')
if output:
printInfo(
"Add const automatically. If you don't want to add const, use `const = False`")
cov = inv(x.T.dot(x))
beta = cov.dot(x.T).dot(y)
r = y - x.dot(beta)
sigma2 = ((r.T.dot(r)) / (x.shape[0] - x.shape[1]))[0, 0]
if sigma2 == 0:
sigma2 = 5e-324
st = '\ty = '
for i in xrange(x.shape[1] - 1):
st += "{0:+10.6f}*{1:s} ".format(beta[i, 0], label[i])
if addconst:
st += "{0:+10.6f}".format(beta[-1, 0])
else:
st += "{0:+10.6f}*{1:s}".format(beta[-1, 0], label[x.shape[1] - 1])
if output:
printInfo("The result is :")
printInfo(st)
printInfo("Test each parameter.")
printInfo("\t{0:^5s}{1:^15s}{2:^15s}{3:^15s}{4:^5s}{5:^9s}{6:^5s}".format(
"xi", "Para", "Sigma", "t-statistics", 'FD', "p-value", 'Sig'))
p = []
ts = []
sig = []
sigma = []
for i in xrange(x.shape[1]):
sigma.append((sigma2 * cov[i, i]) ** .5)
ts.append(beta[i][0] / sigma[-1])
p.append((1. - t.cdf(abs(ts[-1]), x.shape[0] - x.shape[1])) * 2)
sig.append("Yes" if 2. * (1. - t.cdf(abs(beta[i][0] / (
(sigma2 * cov[i, i]) ** .5)), x.shape[0] - x.shape[1])) < alpha else 'No')
if output:
printInfo("\t{0:^5s}{1:^15.6e}{2:^15.6e}{3:^15.6e}{4:^5d}{5:^9f}"
"{6:^5s}".format(label[i],
beta[i][0],
sigma[-1],
ts[-1],
x.shape[0] - x.shape[1],
p[-1],
sig[-1]))
p = array(p)
ts = array(ts)
sig = array(sig)
sigma = array(sigma)
return {'beta': beta, 'p': p, 't': ts, "label": label, 'sig': sig, 'sigma': sigma}
def performPolyRegression(y, degree=2, **kwargs):
'''Build regression with higher degree of polynomial.
Use the index to build the polynomial.
The *orthogonal unit* scale is used up to 4 degrees. const is not included.
You could specific the `const=False` to disable the const.
'''
from numpy import ndarray, array, arange, zeros
from ..IO.output import printError, printInfo
if not isinstance(y, ndarray):
try:
y = array(y, dtype=float)
except:
printError(
"y must be numpy array or could be converted to numpy array.")
return None
y = array(y, dtype=float)
if y.ndim == 2:
if y.shape[1] != 1:
printInfo("Just take the first column of y.")
y = y[:, 0:1]
elif y.ndim == 1:
y.resize((y.size, 1))
else:
printError("y must be 1D or 2D data.")
return None
if not degree in [1, 2, 3, 4]:
printError("degree must between 1 and 4.")
if degree + 1 >= y.shape[0]:
printError("The degree must be less than the data size.")
return None
k = y.shape[0] * 1.0
poly = zeros((k, degree))
t = arange(k, dtype=float)
t = t - t.mean()
label = []
kwargs.pop('label', None)
for i in xrange(degree):
if i == 0:
label.append('x')
else:
label.append('x^' + str(i + 1))
if i == 0:
poly[:, i] = t
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
elif i == 1:
poly[:, i] = t ** 2 - (k ** 2. - 1) / 12
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
elif i == 2:
poly[:, i] = t ** 3 - t * ((3. * k ** 2 - 7) / 20)
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
elif i == 3:
poly[:, i] = t ** 4 - (t ** 2) * ((3 * k ** 2 - 13) /
14.) + 3. * (k ** 2 - 1) * (k ** 2 - 9) / 560
poly[:, i] = poly[:, i] / ((poly[:, i] ** 2).sum()) ** .5
printInfo("The polynomial is listed.")
for i in xrange(degree):
if k > 6:
st = ""
for j in xrange(3):
st += " {0:>7.4f}".format(poly[j, i])
st += ' ...'
for j in xrange(-3, 0):
st += " {0:>7.4f}".format(poly[j, i])
else:
st = ""
for j in xrange(int(k)):
st += " {0:>7.4f}".format(poly[j, i])
printInfo("\t{0:^5s}:{1}".format(label[i], st))
result = performRegression(poly, y, label=label, **kwargs)
result['poly'] = poly
return result
| mit |
sysadminmatmoz/OCB | addons/auth_signup/res_users.py | 25 | 14601 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import random
from urlparse import urljoin
import werkzeug
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT, ustr
from ast import literal_eval
from openerp.tools.translate import _
from openerp.exceptions import UserError
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.SystemRandom().choice(chars) for i in xrange(20))
def now(**kwargs):
dt = datetime.now() + timedelta(**kwargs)
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class res_partner(osv.Model):
_inherit = 'res.partner'
def _get_signup_valid(self, cr, uid, ids, name, arg, context=None):
dt = now()
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = bool(partner.signup_token) and \
(not partner.signup_expiration or dt <= partner.signup_expiration)
return res
def _get_signup_url_for_action(self, cr, uid, ids, action=None, view_type=None, menu_id=None, res_id=None, model=None, context=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
if context is None:
context= {}
res = dict.fromkeys(ids, False)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
for partner in self.browse(cr, uid, ids, context):
# when required, make sure the partner has a valid signup token
if context.get('signup_valid') and not partner.user_ids:
self.signup_prepare(cr, uid, [partner.id], context=context)
route = 'login'
# the parameters to encode for the query
query = dict(db=cr.dbname)
signup_type = context.get('signup_force_type_in_url', partner.signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.signup_token and signup_type:
query['token'] = partner.signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
fragment = dict()
base = '/web#'
if action == '/mail/view':
base = '/mail/view?'
elif action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['res_id'] = res_id
if fragment:
query['redirect'] = base + werkzeug.url_encode(fragment)
res[partner.id] = urljoin(base_url, "/web/%s?%s" % (route, werkzeug.url_encode(query)))
return res
def _get_signup_url(self, cr, uid, ids, name, arg, context=None):
""" proxy for function field towards actual implementation """
return self._get_signup_url_for_action(cr, uid, ids, context=context)
_columns = {
'signup_token': fields.char('Signup Token', copy=False),
'signup_type': fields.char('Signup Token Type', copy=False),
'signup_expiration': fields.datetime('Signup Expiration', copy=False),
'signup_valid': fields.function(_get_signup_valid, type='boolean', string='Signup Token is Valid'),
'signup_url': fields.function(_get_signup_url, type='char', string='Signup URL'),
}
def action_signup_prepare(self, cr, uid, ids, context=None):
return self.signup_prepare(cr, uid, ids, context=context)
def signup_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'signup_token': False, 'signup_type': False, 'signup_expiration': False}, context=context)
def signup_prepare(self, cr, uid, ids, signup_type="signup", expiration=False, context=None):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self.browse(cr, uid, ids, context):
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(cr, uid, token, context=context):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
def _signup_retrieve_partner(self, cr, uid, token,
check_validity=False, raise_exception=False, context=None):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner_ids = self.search(cr, uid, [('signup_token', '=', token)], context=context)
if not partner_ids:
if raise_exception:
raise SignupError("Signup token '%s' is not valid" % token)
return False
partner = self.browse(cr, uid, partner_ids[0], context)
if check_validity and not partner.signup_valid:
if raise_exception:
raise SignupError("Signup token '%s' is no longer valid" % token)
return False
return partner
def signup_retrieve_info(self, cr, uid, token, context=None):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(cr, uid, token, raise_exception=True, context=None)
res = {'db': cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = res['login'] = partner.email or ''
return res
class res_users(osv.Model):
_inherit = 'res.users'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
for user in self.browse(cr, uid, ids, context):
res[user.id] = ('active' if user.login_date else 'new')
return res
_columns = {
'state': fields.function(_get_state, string='Status', type='selection',
selection=[('new', 'Never Connected'), ('active', 'Connected')]),
}
def signup(self, cr, uid, values, token=None, context=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
res_partner = self.pool.get('res.partner')
partner = res_partner._signup_retrieve_partner(
cr, uid, token, check_validity=True, raise_exception=True, context=None)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
return (cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
self._signup_create_user(cr, uid, values, context=context)
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(cr, uid, values, context=context)
return (cr.dbname, values.get('login'), values.get('password'))
def _signup_create_user(self, cr, uid, values, context=None):
""" create a new user from the template user """
ir_config_parameter = self.pool.get('ir.config_parameter')
template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'
# check that uninvited users may sign up
if 'partner_id' not in values:
if not literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')):
raise SignupError('Signup is not allowed for uninvited users')
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
context = dict(context or {}, no_reset_password=True)
try:
with cr.savepoint():
return self.copy(cr, uid, template_user_id, values, context=context)
except Exception, e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, cr, uid, login, context=None):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
user_ids = self.search(cr, uid, [('login', '=', login)], context=context)
if not user_ids:
user_ids = self.search(cr, uid, [('email', '=', login)], context=context)
if len(user_ids) != 1:
raise Exception(_('Reset password: invalid username or email'))
return self.action_reset_password(cr, uid, user_ids, context=context)
def action_reset_password(self, cr, uid, ids, context=None):
""" create signup token for each user, and send their signup url by email """
# prepare reset password signup
if not context:
context = {}
create_mode = bool(context.get('create_user'))
res_partner = self.pool.get('res.partner')
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context)]
# no time limit for initial invitation, only for reset password
expiration = False if create_mode else now(days=+1)
res_partner.signup_prepare(cr, uid, partner_ids, signup_type="reset", expiration=expiration, context=context)
context = dict(context or {})
# send email to users with their signup url
template = False
if create_mode:
try:
# get_object() raises ValueError if record does not exist
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'set_password_email')
except ValueError:
pass
if not bool(template):
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'reset_password_email')
assert template._name == 'mail.template'
for user in self.browse(cr, uid, ids, context):
if not user.email:
raise UserError(_("Cannot send email: user %s has no email address.") % user.name)
context['lang'] = user.lang
self.pool.get('mail.template').send_mail(cr, uid, template.id, user.id, force_send=True, raise_exception=True, context=context)
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
# overridden to automatically invite user to sign up
user_id = super(res_users, self).create(cr, uid, values, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.email and not context.get('no_reset_password'):
context = dict(context, create_user=True)
try:
self.action_reset_password(cr, uid, [user.id], context=context)
except MailDeliveryException:
self.pool.get('res.partner').signup_cancel(cr, uid, [user.partner_id.id], context=context)
return user_id
def copy(self, cr, uid, id, default=None, context=None):
if not default or not default.get('email'):
# avoid sending email to the user we are duplicating
context = dict(context or {}, reset_password=False)
return super(res_users, self).copy(cr, uid, id, default=default, context=context)
| agpl-3.0 |
arsenovic/clifford | clifford/test/test_g3c_tools.py | 1 | 54096 | import random
from functools import reduce
import time
import functools
import numpy as np
import numpy.testing as npt
from numpy import exp
import pytest
import numba
from clifford import Cl
from clifford.g3c import *
from clifford import general_exp
from clifford.tools.g3c import *
from clifford.tools.g3c.rotor_parameterisation import ga_log, ga_exp, general_logarithm, \
interpolate_rotors
from clifford.tools.g3c.rotor_estimation import *
from clifford.tools.g3c.object_clustering import *
from clifford.tools.g3c.scene_simplification import *
from clifford.tools.g3c.object_fitting import *
from clifford.tools.g3c.model_matching import *
from clifford.tools.g3 import random_euc_mv
from clifford.tools.g3c.GAOnline import draw_objects, GAScene, GanjaScene
from clifford._numba_utils import DISABLE_JIT
too_slow_without_jit = pytest.mark.skipif(
DISABLE_JIT, reason="test is too slow without JIT"
)
RTOL_DEFAULT = 1E-4
ATOL_DEFAULT = 1E-6
assert_allclose = functools.partial(npt.assert_allclose, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT)
@too_slow_without_jit
class TestRotorGeneration:
def test_generate_translation_rotor(self):
for i in range(10000):
euc_vector_a = random_euc_mv()
res = generate_translation_rotor(euc_vector_a)
res2 = (1 + ninf * euc_vector_a / 2)
assert_allclose(res.value, res2.value)
@too_slow_without_jit
class TestFitObjects:
def test_fit_circle(self):
noise = 0.1
trueP = random_circle()
point_list = project_points_to_circle([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
circle = fit_circle(point_list)
print(circle)
# draw(point_list + [circle], static=False, scale=0.1)
def test_fit_line(self):
noise = 0.1
trueP = random_line()
point_list = project_points_to_line([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
line = fit_line(point_list)
print(line)
# draw(point_list + [line], static=False, scale=0.1)
def test_fit_sphere(self):
noise = 0.1
trueP = random_sphere()
point_list = project_points_to_sphere([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
sphere = fit_sphere(point_list)
print(sphere)
# draw([sphere] + point_list, static=False, scale=0.1)
def test_fit_plane(self):
noise = 0.1
trueP = random_plane()
point_list = project_points_to_plane([random_conformal_point() for i in range(100)], trueP)
point_list = [up(down(P) + noise * random_euc_mv()) for P in point_list]
print(trueP)
plane = fit_plane(point_list)
print(plane)
# draw(point_list + [plane], static=False, scale=0.1)
@too_slow_without_jit
class TestGeneralLogarithm:
def test_general_logarithm_rotation(self):
# Check we can reverse rotations
for i in range(50):
R = random_rotation_rotor()
biv_2 = general_logarithm(R)
biv_3 = ga_log(R)
assert_allclose(biv_2.value, biv_3.value)
def test_general_logarithm_translation(self):
# Check we can reverse translation
for i in range(50):
t = random_euc_mv()
biv = ninf * t / 2
R = general_exp(biv).normal()
biv_2 = general_logarithm(R)
assert_allclose(biv.value, biv_2.value)
def test_general_logarithm_scaling(self):
# Check we can reverse scaling
for i in range(50):
scale = 0.5 + np.random.rand()
biv = -np.log(scale) * e45 / 2
R = general_exp(biv).normal()
biv_2 = general_logarithm(R)
assert_allclose(biv.value, biv_2.value)
def test_general_logarithm_RS(self):
for i in range(5):
scale = 0.5 + np.random.rand()
S = generate_dilation_rotor(scale).normal()
R = generate_rotation_rotor(0.5, e1, e2).normal()
V = (R * S).normal()
biv_test = general_logarithm(R) + general_logarithm(S)
biv = general_logarithm(V)
biv_alt = ga_log(R) + general_logarithm(S)
assert_allclose(biv.value, biv_test.value)
assert_allclose(biv.value, biv_alt.value)
def test_general_logarithm_TR(self):
for i in range(5):
# R = generate_rotation_rotor(0.5, e1, e2).normal()
# T = generate_translation_rotor(e3 + 7 * e2 - e1).normal()
# V = (T*R).normal()
biv_true = random_bivector()
V = general_exp(biv_true).normal()
biv = general_logarithm(V)
V_rebuilt = (general_exp(biv)).normal()
C1 = random_point_pair()
C2 = (V * C1 * ~V).normal()
C3 = (V_rebuilt * C1 * ~V_rebuilt).normal()
assert_allclose(C2.value, C3.value)
def test_general_logarithm_TS(self):
for i in range(5):
scale = 0.5 + np.random.rand()
t = random_euc_mv()
S = generate_dilation_rotor(scale)
T = generate_translation_rotor(t)
V = (T * S).normal()
biv = general_logarithm(V)
V_rebuilt = (general_exp(biv)).normal()
C1 = random_point_pair()
C2 = (V * C1 * ~V).normal()
C3 = (V_rebuilt * C1 * ~V_rebuilt).normal()
assert_allclose(C2.value, C3.value)
def test_general_logarithm_TRS(self):
for i in range(5):
scale = 0.5 + np.random.rand()
S = generate_dilation_rotor(scale)
R = generate_rotation_rotor(0.5, e1, e2)
T = generate_translation_rotor(e3 + 7 * e2 - e1)
V = (T * R * S).normal()
biv = general_logarithm(V)
V_rebuilt = general_exp(biv).normal()
biv2 = general_logarithm(V)
C1 = random_point_pair()
C2 = (V * C1 * ~V).normal()
C3 = (V_rebuilt * C1 * ~V_rebuilt).normal()
assert_allclose(C2.value, C3.value)
@pytest.mark.parametrize('obj_gen', [
random_point_pair, random_line, random_circle, random_plane
])
def test_general_logarithm_conformal(self, obj_gen):
for i in range(1000):
X = obj_gen()
Y = obj_gen()
R = rotor_between_objects(X, Y)
biv = general_logarithm(R)
R_recon = general_exp(biv).normal()
assert_allclose(R.value, R_recon.value)
class TestVisualisation:
def test_draw_objects(self):
scene = ConformalMVArray([random_line() for i in range(100)])
sc_a = str(draw_objects(scene))
scene.save('test.ga')
sc_b = str(draw_objects('test.ga'))
assert sc_a == sc_b
def test_ganja_scene(self):
scene = ConformalMVArray([up(0)^up(e1)^einf, up(0)^up(e2)^einf, up(0)^up(e3)^einf]
+ [random_line() for i in range(2)])
sc = GanjaScene()
sc.add_objects(scene)
sc.save_to_file('test.json')
class TestConformalArray:
def test_up_down(self):
mv = []
up_mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
up_mv.append(up(p))
test_array = ConformalMVArray(mv)
up_array = test_array.up()
down_array = up_array.down()
for a, b in zip(up_array, up_mv):
assert_allclose(a.value, b.value)
for a, b in zip(down_array, mv):
assert_allclose(a.value, b.value)
@too_slow_without_jit
def test_apply_rotor(self):
mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
test_array = ConformalMVArray(mv)
up_array = test_array.up()
# Test apply rotor
for i in range(100):
R = ConformalMVArray([layout.randomRotor()])
rotated_array = up_array.apply_rotor(R)
for i, v in enumerate(rotated_array):
res = apply_rotor(up_array[i], R[0]).value
assert_allclose(v.value, res)
def test_dual(self):
mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
test_array = ConformalMVArray(mv)
up_array = test_array.up()
I5 = layout.blades['e12345']
assert_allclose(
(up_array * ConformalMVArray([I5])).value,
ConformalMVArray([i * I5 for i in up_array]).value)
def test_from_value_array(self):
mv = []
for i in range(100):
p = random_euc_mv()
mv.append(p)
test_array = ConformalMVArray(mv)
up_array = test_array.up()
new_mv_array = ConformalMVArray.from_value_array(up_array.value)
npt.assert_almost_equal(new_mv_array.value, up_array.value)
@too_slow_without_jit
class TestG3CTools:
@pytest.fixture(params=[
random_point_pair,
random_line,
random_circle,
random_plane,
random_sphere
])
def obj_gen(self, request):
return request.param
def test_factorise(self, obj_gen):
n_repeats = 50
for i in range(n_repeats):
X1 = obj_gen()
basis, scale = X1.factorise()
for b in basis:
gpres = b.grades(eps=0.0001)
assert gpres == {1}
new_blade = (reduce(lambda a, b: a ^ b, basis) * scale)
try:
assert_allclose(new_blade.value, X1.value)
except AssertionError:
print(X1)
print(new_blade)
raise
def test_is_blade(self):
a = random_bivector() + random_circle()
assert not a.isBlade()
a = random_translation_rotor()
assert not a.isBlade()
def test_is_blade_generated(self, obj_gen):
n_repeats = 5
for i in range(n_repeats):
a = obj_gen()
assert a.isBlade()
def test_average_objects(self, obj_gen):
n_repeats = 1000
for i in range(n_repeats):
X1 = obj_gen()
X2 = obj_gen()
obj_list = [X1, X2]
average_objects(obj_list, weights=[0.5, 0.5])
def test_point_beyond_plane(self):
for i in range(200):
normal = random_euc_mv().normal()
euc_perp_dist = np.random.randn()*3
plane = I5 * (normal + euc_perp_dist * einf)
P1 = up(normal * (euc_perp_dist+1))
assert point_beyond_plane(P1, plane)
P2 = up(normal * (euc_perp_dist-1))
assert not point_beyond_plane(P2, plane)
def test_unsign_sphere(self):
for i in range(100):
S = unsign_sphere(random_sphere())
r = np.random.randn()
assert_allclose(unsign_sphere(S*r).value, S.value)
def test_sphere_line_intersect(self):
for i in range(100):
S = random_sphere()
L = ((S*einf*S)^random_conformal_point()^einf).normal()
assert sphere_line_intersect(S, L)
def test_sphere_beyond_behind_plane(self):
for i in range(100):
normal = random_euc_mv().normal()
euc_perp_dist = np.random.randn() * 3
plane = I5 * (normal + euc_perp_dist * einf)
radius = abs(np.random.randn() * 2)
sphere1 = I5*(up(normal * (euc_perp_dist + radius*1.1)) - 0.5*radius**2*einf)
assert sphere_beyond_plane(sphere1, plane)
assert not sphere_behind_plane(sphere1, plane)
sphere2 = I5*(up(normal * (euc_perp_dist - radius*1.1)) - 0.5*radius**2*einf)
assert not sphere_beyond_plane(sphere2, plane)
assert sphere_behind_plane(sphere2, plane)
sphere3 = I5*(up(normal * (euc_perp_dist - radius*0.5)) - 0.5*radius**2*einf)
assert not sphere_beyond_plane(sphere3, plane)
assert not sphere_behind_plane(sphere3, plane)
def test_join_spheres(self):
for j in range(1000):
s1 = random_sphere()
s2 = random_sphere()
s3 = join_spheres(s1, s2)
assert sphere_in_sphere(s1, s3)
assert sphere_in_sphere(s2, s3)
def test_enclosing_spheres(self):
n_spheres = 10
for j in range(1000):
spheres = [random_sphere() for i in range(n_spheres)]
s4 = normalised(enclosing_sphere(spheres))
for s in spheres:
assert sphere_in_sphere(s, s4)
def test_closest_furthest_circle_points(self):
"""
This just checks if the function calls do not crash at the moment
Not that it is correct
"""
for _ in range(100):
C1 = random_circle()
C2 = random_circle()
pclose = iterative_closest_points_on_circles(C1, C2)
pfar = iterative_furthest_points_on_circles(C1, C2)
def test_closest_points_circle_line(self):
"""
This checks that the functions do not fail
It also checks that the points produced do lie on the circle and line
It does not as of yet check that they actually produce the minimum distance
"""
for i in range(10):
L = random_line()
C = random_circle()
X1, X2 = iterative_closest_points_circle_line(C, L, niterations=50)
X1Andreas = closest_point_on_circle_from_line(C, L)
X2Andreas = closest_point_on_line_from_circle(C, L)
assert_allclose((X1 ^ C).value, 0)
assert_allclose((X1Andreas ^ C).value, 0)
assert_allclose((X2 ^ L).value, 0)
assert_allclose((X2Andreas ^ L).value, 0)
def test_closest_points_circle_line_corner_cases(self):
# Choose explicit cases to ensure test coverage
# The line and plane of the circle are parallel
# line is not in the plane and the projection does meet the circle
L = (up(e3)^up(e1+e3)^einf).normal()
C = (up(e1)^up(e2)^up(-e1)).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert (X == up(e1) or X == up(-e1))
# The line and plane of the circle are parallel
# line is not in the plane and the projection does not meet the circle
L = (up(e3 + 5*e2) ^ up(e1 + e3 + 5*e2) ^ einf).normal()
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert X == up(e2)
# Line passes through the centre of the circle and is
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0) ^ up(e3) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
# Line passes through the circle and is perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0.5*e2) ^ up(e3 + 0.5*e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert X == up(e2)
# Line passes through the centre of the circle and is not
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0) ^ up(e3 + 0.1 * e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert (X == up(e2) or X == up(-e2))
# Line passes through the circle and is not
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(0.1 * e2) ^ up(e3 + 0.2 * e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert (X == up(e2) or X == up(-e2))
# Line passes outside the circle and is not
# perpendicular to the circle
C = (up(e1) ^ up(e2) ^ up(-e1)).normal()
L = (up(5 * e1) ^ up(e3 + 5 * e1 + e2) ^ einf).normal()
X = closest_point_on_circle_from_line(C, L)
assert_allclose((X ^ C).value, 0)
assert X == up(e1)
def test_get_line_reflection_matrix(self):
for i in range(10):
lines = [random_line() for i in range(10)]
point = random_conformal_point()
Lmat = get_line_reflection_matrix(lines, 1)
res = layout.MultiVector([email protected])
new_point = 0
for l in lines:
new_point += l*point*l
new_point = new_point/len(lines)
assert_allclose(new_point.value, res.value)
def test_get_truncated_line_reflection_matrix(self):
for i in range(10):
lines = [random_line() for i in range(10)]
Lmat = get_line_reflection_matrix(lines, 1)
Lmat_trunc = val_truncated_get_line_reflection_matrix(np.array([l.value for l in lines]), 1)
assert_allclose(Lmat_trunc, Lmat[1:6, 1:6])
def test_get_midpoint_between_lines(self):
for i in range(50):
P = random_conformal_point()
T1 = random_translation_rotor()
T2 = random_translation_rotor()
P1 = apply_rotor(P, T1)
P2 = apply_rotor(P, T2)
L1 = (P ^ P1 ^ einf).normal()
L2 = (P ^ P2 ^ einf).normal()
Pmid = midpoint_between_lines(L1, L2)
assert_allclose(Pmid.value, P.value)
for i in range(50):
L1 = random_line()
L2 = random_line()
Pmid = midpoint_between_lines(L1, L2)
L1point = project_points_to_line([Pmid], L1)[0]
L2point = project_points_to_line([Pmid], L2)[0]
dst = euc_dist(L1point, L2point)
middst1 = euc_dist(Pmid, L1point)
middst2 = euc_dist(Pmid, L2point)
npt.assert_allclose(dst, 2 * middst1)
npt.assert_allclose(dst, 2 * middst2)
def test_get_nearest_plane_point(self):
for i in range(100):
plane = random_plane()
pnt = get_nearest_plane_point(plane)
s2 = eo + normalise_n_minus_1((plane*eo*plane)(1))
pnt2 = normalise_n_minus_1((s2*einf*s2)(1))
assert_allclose(pnt.value, pnt2.value)
def test_general_object_interpolation(self):
R_r = generate_rotation_rotor(np.pi / 16, e2, e3) * generate_rotation_rotor(np.pi / 4, e1, e2)
R_d = generate_dilation_rotor(1.5)
R_t = generate_translation_rotor(e3)
R = (R_t * R_r * R_d).normal()
# C1 = (up(0+3*e1)^up(2*e1+3*e1)).normal()
C1 = (up(0 + 3 * e1) ^ up(2 * e1 + 3 * e1) ^ up(e1 + e3 + 3 * e1)).normal()
C2 = (R * C1 * ~R).normal()(3)
C3 = (R * C2 * ~R).normal()(3)
C4 = (R * C3 * ~R).normal()(3)
C5 = (R * C4 * ~R).normal()(3)
object_list = [C1, C2, C3, C4, C5]
object_alpha_array = np.array([0.0, 0.25, 0.5, 0.75, 1.0])
new_alpha_array = np.linspace(0.0, 1.0)
new_object_list = general_object_interpolation(object_alpha_array, object_list, new_alpha_array,
kind='quadratic')
new_object_list = [o(3) for o in new_object_list]
draw_objects(object_list, 'circle', color='rgb(255,0,0)')
draw_objects(new_object_list, 'circle', color='rgb(0,255,0)')
time.sleep(1)
def test_n_th_root(self):
for i in range(200):
a = random_point_pair()
b = random_point_pair()
R = rotor_between_objects(a, b)
for n in [1, 2, 4, 8, 16, 32]:
R_n = n_th_rotor_root(R, n)
assert_allclose((R_n ** n).value, R.value)
def test_random_point_pair_at_origin(self):
pp_list = [random_point_pair_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_point_pair(pp)
print(sc)
def test_random_line_at_origin(self):
pp_list = [random_line_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_line(pp)
print(sc)
def test_random_circle_at_origin(self):
pp_list = [random_circle_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_circle(pp)
print(sc)
def test_random_sphere_at_origin(self):
pp_list = [random_sphere_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_sphere(pp)
print(sc)
def test_random_plane_at_origin(self):
pp_list = [random_plane_at_origin() for i in range(10)]
sc = GAScene()
for pp in pp_list:
sc.add_plane(pp)
print(sc)
def test_generate_translation_rotor(self):
""" Tests translation rotor generation """
for i in range(100):
rand = random_euc_mv()
starting_point = up(random_euc_mv())
r_trans = generate_translation_rotor(rand)
end_point = r_trans * starting_point * ~r_trans
translation_vec = down(end_point) - down(starting_point)
assert_allclose(translation_vec.value, rand.value)
def test_intersect_line_and_plane_to_point(self):
""" Intersection of a line and a plane """
# First the case that they intersect
line = (up(2*e1) ^ up(2*e1 + e3) ^ ninf).normal()
plane = (up(e3) ^ up(e3 + e1) ^ up(e3 + e2) ^ ninf).normal()
point_result = intersect_line_and_plane_to_point(line, plane)
npt.assert_almost_equal(point_result.value, up(e3 + 2*e1).value)
# Next the case that the do not intersect
line = (up(0) ^ up(e1) ^ ninf).normal()
point_result = intersect_line_and_plane_to_point(line, plane)
assert point_result is None
for i in range(200):
line = random_line()
plane = random_plane()
point_result = intersect_line_and_plane_to_point(line, plane)
# draw_objects([line], mv_type='line')
# draw_objects([plane], mv_type='plane', color='rgb(0,255,0)')
# draw_objects([point_result], mv_type='euc_point', color='rgb(255,0,0)')
def test_normalise_n_minus_1(self):
for i in range(500):
mv = np.random.rand() * random_conformal_point()
mv_normed = normalise_n_minus_1(mv)
npt.assert_almost_equal((mv_normed | ninf)[()], -1.0)
def test_get_properties_of_sphere(self):
for i in range(100):
# Make a sphere
scale_factor = np.random.rand()
sphere = (up(scale_factor * e1) ^ up(-scale_factor * e1) ^ up(scale_factor * e3) ^ up(
scale_factor * e2)).normal()
# Translate it
rand_trans = random_euc_mv()
trans_rot = generate_translation_rotor(rand_trans)
sphere = (trans_rot * sphere * ~trans_rot).normal()
center = get_center_from_sphere(sphere)
radius = get_radius_from_sphere(sphere)
assert_allclose(down(center).value, rand_trans.value)
npt.assert_almost_equal(radius, scale_factor)
def test_point_pair_to_end_points(self):
for i in range(100):
point_a = random_conformal_point()
point_b = random_conformal_point()
pp = (point_a ^ point_b).normal()
p_a, p_b = point_pair_to_end_points(pp)
assert_allclose(p_a.value, point_a.value)
assert_allclose(p_b.value, point_b.value)
def test_euc_distance(self):
for i in range(100):
point_a = random_conformal_point()
point_b = random_conformal_point()
dist = euc_dist(point_a, point_b)
dist_alt = float(abs(down(point_a) - down(point_b)))
assert_allclose(dist, dist_alt)
def test_dilation_rotor(self):
for i in range(100):
scale = 2 * np.random.rand()
r = generate_dilation_rotor(scale)
sphere = random_sphere()
radius = get_radius_from_sphere(sphere)
sphere2 = (r * sphere * ~r).normal()
radius2 = get_radius_from_sphere(sphere2)
npt.assert_almost_equal(scale, radius2 / radius)
def test_calculate_S_over_mu_general(self, obj_gen):
# Repeats for each fuzz test
n_repeats = 100
# Test the general case
for i in range(n_repeats):
X1 = obj_gen()
X2 = obj_gen()
S = calculate_S_over_mu(X1, X2)
X3 = -S*(X1 + X2)
X4 = average_objects([X1, X2], [0.5, 0.5]).normal()
if sum(np.abs((X3 + X4).value)) < 0.000001:
print(' SIGN FLIP')
X4 = -X4
try:
assert_allclose(X3.value, X4.value)
except AssertionError:
print(X3)
print(X4)
raise
def test_general_rotor_between_objects(self, obj_gen):
# Repeats for each fuzz test
n_repeats = 1000
# Test the general case
for i in range(n_repeats):
C1 = obj_gen()
C2 = obj_gen()
R = rotor_between_objects(C1, C2)
C3 = (R * C1 * ~R).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
print('SIGN FLIP ', obj_gen.__name__)
C3 = -C3
try:
assert_allclose(C2.value, C3.value)
except AssertionError:
print(R)
print(C2*C1 + C1*C2)
raise
@pytest.mark.parametrize(('obj_gen', 'grd'), [
(random_point_pair, 2),
(random_circle, 3),
pytest.param(random_sphere, 4, marks=[
# gh-104
pytest.mark.xfail(reason="Non-deterministic, sometimes fails", strict=False),
])
])
def test_motor_between_rounds(self, obj_gen, grd):
# Repeats for each fuzz test
n_repeats = 1000
# Test the general case
for i in range(n_repeats):
C1 = obj_gen()
Rt = random_rotation_translation_rotor()
C2 = (Rt * C1 * ~Rt)(grd).normal()
R = motor_between_rounds(C1, C2)
C3 = (R * C1 * ~R)(grd).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
print('SIGN FLIP ', obj_gen.__name__)
C3 = -C3
try:
assert_allclose(C2.value, C3.value)
except AssertionError:
print(C2.normal())
print(C3.normal())
raise
# @pytest.mark.skip(reason="unknown") # Skip this because we know that it is a breaking case
def test_general_rotor_between_objects_specific_cases(self):
C1 = -(2.48651^e1234) - (2.48651^e1235) - (1.0^e1245) + (3e-05^e1345) - (0.0^e2345)
C2 = -(25.8135^e1234) - (25.8135^e1235) + (1.0^e1245) - (3e-05^e1345) - (0.0^e2345)
R = rotor_between_objects(C1, C2)
C3 = (R * C1 * ~R).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
C3 = -C3
assert_allclose(C2.value, C3.value)
# @pytest.mark.skip(reason="unknown") # Skip this because we know that it is a breaking case
def test_rotor_between_non_overlapping_spheres(self):
C1 = random_sphere()
rad = get_radius_from_sphere(C1)
t_r = generate_translation_rotor(2.5*rad*e1)
C2 = (t_r * C1 * ~t_r)(4).normal()
rad2 = get_radius_from_sphere(C2)
R = rotor_between_objects(C1, C2)
C3 = (R * C1 * ~R).normal()
if sum(np.abs((C2 + C3).value)) < 0.0001:
print('SIGN FLIP ')
C3 = -C3
assert_allclose(C2.value, C3.value)
@too_slow_without_jit
class TestRotorEstimation:
def run_rotor_estimation(self, object_generator, estimation_function,
n_runs=20, n_objects_per_run=10):
error_count = 0
for i in range(n_runs):
query_model = [object_generator().normal() for i in range(n_objects_per_run)]
r = (generate_translation_rotor(random_euc_mv(l_max=0.01)) * generate_rotation_rotor(np.random.randn() / 10,
random_euc_mv().normal(),
random_euc_mv().normal())).normal()
reference_model = [(r * l * ~r).normal() for l in query_model]
r_est = estimation_function(reference_model, query_model)
error_flag = False
for a, b in zip([(r_est * l * ~r_est).normal() for l in query_model], reference_model):
if abs(a + b) < 0.0001:
c = -b
print('SIGN FLIP')
else:
c = b
if np.any(np.abs(a.value - c.value) > 0.01):
error_flag = True
if error_flag:
error_count += 1
print(i, error_count)
print('\n\nESTIMATION SUMMARY')
print('OBJECTS ', n_objects_per_run)
print('RUNS ', n_runs)
print('ERRORS ', error_count)
print('ERROR percentage ', 100 * error_count / float(n_runs), '%')
def test_de_keninck_twist(self):
X = MVArray([random_conformal_point() for i in range(100)])
R = random_rotation_rotor()
noise_std = 0.0
Y = MVArray([normalise_n_minus_1(apply_rotor(x, random_translation_rotor(noise_std) * R)) for x in X])
res = de_keninck_twist(Y, X)
try:
assert_allclose(R.value, res.value)
except AssertionError:
assert_allclose(R.value, -res.value)
def test_direct_TRS_extraction(self):
X = MVArray([random_conformal_point() for i in range(100)])
R = (random_rotation_translation_rotor(maximum_translation=100) * generate_dilation_rotor(
0.5 + 2 * np.random.rand())).normal()
noise_std = 0.0
Y = MVArray([normalise_n_minus_1(apply_rotor(x, random_translation_rotor(noise_std) * R)) for x in X])
res = direct_TRS_extraction(Y, X)
try:
assert_allclose(R.value, res.value)
except AssertionError:
assert_allclose(R.value, -res.value)
def test_dorst_motor_points(self):
X = MVArray([random_conformal_point() for i in range(100)])
R = random_rotation_translation_rotor(maximum_translation=100)
noise_std = 0.0
Y = MVArray([normalise_n_minus_1(apply_rotor(x, random_translation_rotor(noise_std) * R)) for x in X])
res = dorst_motor_estimate(Y, X)
try:
assert_allclose(R.value, res.value)
except AssertionError:
assert_allclose(R.value, -res.value)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
random_point_pair,
random_plane,
random_sphere,
])
def test_dorst_motor_estimate(self, obj_gen):
self.run_rotor_estimation(obj_gen, dorst_motor_estimate)
def test_estimate_rotor_lines_average_then_opt(self):
def estimation_func(pp_list_a, pp_list_b):
r_start = average_estimator(pp_list_a, pp_list_b)
query_start = [apply_rotor(b, r_start)(3).normal() for b in pp_list_b]
r_est, costs = estimate_rotor_objects(pp_list_a, query_start)
return (r_est*r_start).normal()
self.run_rotor_estimation(random_line, estimation_func)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
random_point_pair,
random_plane,
pytest.param(random_sphere, marks=pytest.mark.skip(reason="unknown")),
])
def test_estimate_motor_optimisation(self, obj_gen):
def estimation_func(pp_list_a, pp_list_b):
r_est, costs = estimate_rotor_objects(pp_list_a, pp_list_b, motor=True)
return r_est
self.run_rotor_estimation(obj_gen, estimation_func)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
random_point_pair,
random_plane,
random_sphere,
])
def test_estimate_rotor_optimisation(self, obj_gen):
def estimation_func(pp_list_a, pp_list_b):
r_est, costs = estimate_rotor_objects(pp_list_a, pp_list_b)
return r_est
self.run_rotor_estimation(obj_gen, estimation_func)
@pytest.mark.parametrize('obj_gen', [
random_line,
random_circle,
pytest.param(random_point_pair, marks=pytest.mark.skip(reason="unknown")),
random_plane,
random_sphere
])
def test_estimate_rotor_sequential(self, obj_gen):
def estimation_func(pp_list_a, pp_list_b):
r_est, exit_flag = sequential_object_rotor_estimation(pp_list_a, pp_list_b)
print(exit_flag)
return r_est
self.run_rotor_estimation(obj_gen, estimation_func)
@pytest.mark.skip(reason="unknown")
def test_estimate_rotor_circles_sequential_then_opt(self):
def estimation_func(pp_list_a, pp_list_b):
r_est_1, exit_flag = sequential_object_rotor_estimation(pp_list_a, pp_list_b)
r_est_2 = 1.0
if exit_flag == 1:
object_set_a = [apply_rotor(l, r_est_1).normal() for l in pp_list_a]
r_est_2, costs = estimate_rotor_objects(object_set_a, pp_list_b)
return r_est_2 * r_est_1
self.run_rotor_estimation(random_circle, estimation_func)
@too_slow_without_jit
class TestSceneSimplification:
def test_simplify_recursive(self):
object_generator = random_line
n_clusters = 3
n_objects_per_cluster = 5
threshold = 0.5
all_objects, object_clusters = generate_n_clusters(object_generator,
n_clusters,
n_objects_per_cluster)
all_object_copy = [o for o in all_objects]
all_object_copy = simplify_scene_recursive(all_object_copy, threshold)
print(n_clusters)
# assert len(all_object_copy) == n_clusters
def test_simplify_scene(self):
object_generator = random_line
n_clusters = 3
n_objects_per_cluster = 5
threshold = 2.0
all_objects, object_clusters = generate_n_clusters(object_generator,
n_clusters,
n_objects_per_cluster)
all_object_copy1 = [o for o in all_objects]
all_object_copy1 = simplify_scene(all_object_copy1, threshold)
print(len(all_object_copy1))
# assert len(all_object_copy) == n_clusters
all_object_copy2 = [o for o in all_objects]
all_object_copy2 = simplify_scene(all_object_copy2, threshold)
print(len(all_object_copy2))
draw_objects(all_object_copy1)
draw_objects(all_object_copy2, color='rgb(255,0,0)')
@too_slow_without_jit
class TestObjectClustering:
def run_n_clusters(self, object_generator, n_clusters, n_objects_per_cluster, n_shotgunning):
all_objects, object_clusters = generate_n_clusters(object_generator, n_clusters, n_objects_per_cluster)
[new_labels, centroids, start_labels, start_centroids] = n_clusters_objects(n_clusters, all_objects,
initial_centroids=None,
n_shotgunning=n_shotgunning,
averaging_method='unweighted')
return all_objects, new_labels, centroids
def test_clustering_point_pairs(self):
object_generator = random_point_pair
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='point_pair',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_lines(self):
object_generator = random_line
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='line',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_circles(self):
object_generator = random_circle
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='circle',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_spheres(self):
object_generator = random_sphere
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='sphere',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
def test_clustering_planes(self):
object_generator = random_plane
n_clusters = 3
n_objects_per_cluster = 10
n_shotgunning = 60
all_objects, labels, centroids = self.run_n_clusters(object_generator, n_clusters,
n_objects_per_cluster, n_shotgunning)
sc = visualise_n_clusters(all_objects, centroids, labels, object_type='plane',
color_1=np.array([255, 0, 0]), color_2=np.array([0, 255, 0]))
print(sc)
@pytest.mark.parametrize('obj_gen', [
random_point_pair,
random_line,
random_circle,
random_plane,
random_sphere
])
def test_assign_objects_to_objects(self, obj_gen):
n_repeats = 5
for i in range(n_repeats):
object_set_a = [obj_gen() for i in range(20)]
object_set_b = [l for l in object_set_a]
label_a, costs_a = assign_measurements_to_objects_matrix(object_set_a, object_set_b)
npt.assert_equal(label_a, np.array(range(len(label_a))))
n_repeats = 5
for i in range(n_repeats):
r = random_rotation_translation_rotor(0.001, np.pi / 32)
object_set_a = [obj_gen() for i in range(20)]
object_set_b = [l for l in object_set_a]
label_a, costs_a = assign_measurements_to_objects_matrix(object_set_a, object_set_b)
npt.assert_equal(label_a, np.array(range(len(label_a))))
@too_slow_without_jit
class TestModelMatching:
@pytest.mark.veryslow
def test_fingerprint_match(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
sum_p = 0
n_runs = 20
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=10, maximum_angle=np.pi / 2).normal()
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, min_costs = match_by_fingerprint(target, cluster_objects)
pcorrect = 100*np.sum([l == i for i, l in enumerate(labels)])/n_objects_per_cluster
sum_p += pcorrect
print('Percent correct: ', pcorrect)
print('av_p_correct ', sum_p/n_runs)
print('\n', flush=True)
def test_iterative_model_match_line_optimised(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_iterations = 30
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match(target, cluster_objects, n_iterations, object_type='lines')
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
def test_iterative_model_match(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_iterations = 30
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8).normal()
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match(target, cluster_objects, n_iterations, object_type='generic')
r_est = r_est.normal()
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_iterative_model_match_cuda(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match(target, cluster_objects, 30,
object_type='generic', cuda=True)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
def test_iterative_model_match_sequential(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match_sequential(target, cluster_objects, 30, object_type='generic')
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_iterative_model_match_sequential_cuda(self):
object_generator = random_line
n_objects_per_cluster = 20
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 5
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = iterative_model_match_sequential(target, cluster_objects, 30,
object_type='generic', cuda=True)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM(self):
object_generator = random_line
n_objects_per_cluster = 20
objects_per_sample = 10
iterations = 30
pool_size = 8
n_samples = 8
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = REFORM(target, cluster_objects, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM_sequential(self):
object_generator = random_line
n_objects_per_cluster = 20
objects_per_sample = 10
iterations = 30
pool_size = 8
n_samples = 8
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = REFORM_sequential(target, cluster_objects, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size)
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM_line_optimised(self):
object_generator = random_line
n_objects_per_cluster = 20
objects_per_sample = 5
iterations = 30
pool_size = 8
n_samples = pool_size
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
labels, costs, r_est = REFORM(target, cluster_objects, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size,
object_type='lines')
try:
assert np.sum(labels == range(n_objects_per_cluster)) == n_objects_per_cluster
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_iterative_model_match_incomplete_query(self):
# Set the generator
object_generator = random_line
n_objects_per_cluster = 100
n_keep = 50
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
error_count = 0
n_runs = 10
for i in range(n_runs):
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
# Keep only a fixed number of the query model objects
sample_indices = random.sample(range(n_objects_per_cluster), n_keep)
query_model = [cluster_objects[i] for i in sample_indices]
labels, costs, r_est = iterative_model_match(target, query_model, 30, object_type='generic')
try:
assert np.sum(labels == sample_indices) == n_keep
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
@pytest.mark.skip(reason="unknown")
def test_REFORM_incomplete_query(self):
object_generator = random_line
n_objects_per_cluster = 100
n_keep = 50
objects_per_sample = 10
iterations = 30
pool_size = 8
n_samples = 8
error_count = 0
n_runs = 10
for i in range(n_runs):
# Make a cluster
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 3)
# Rotate and translate the cluster
disturbance_rotor = random_rotation_translation_rotor(maximum_translation=2, maximum_angle=np.pi / 8)
target = [apply_rotor(c, disturbance_rotor).normal() for c in cluster_objects]
# Keep only a fixed number of the query model objects
sample_indices = random.sample(range(n_objects_per_cluster), n_keep)
query_model = [cluster_objects[i] for i in sample_indices]
labels, costs, r_est = REFORM_sequential(target, query_model, n_samples, objects_per_sample,
iterations, covergence_threshold=0.00000001, pool_size=pool_size)
try:
assert np.sum(labels == sample_indices) == n_keep
except AssertionError:
print(disturbance_rotor)
print(r_est)
error_count += 1
print('Correct fraction: ', 1.0 - error_count / n_runs)
| bsd-3-clause |
wummel/linkchecker | third_party/dnspython/tests/set.py | 9 | 5255 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.set
# for convenience
S = dns.set.Set
class SimpleSetTestCase(unittest.TestCase):
def testLen1(self):
s1 = S()
self.failUnless(len(s1) == 0)
def testLen2(self):
s1 = S([1, 2, 3])
self.failUnless(len(s1) == 3)
def testLen3(self):
s1 = S([1, 2, 3, 3, 3])
self.failUnless(len(s1) == 3)
def testUnion1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 | s2 == e)
def testUnion3(self):
s1 = S([1, 2, 3])
s2 = S([3, 4])
e = S([1, 2, 3, 4])
self.failUnless(s1 | s2 == e)
def testIntersection1(self):
s1 = S([1, 2, 3])
s2 = S([1, 2, 3])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection2(self):
s1 = S([0, 1, 2, 3])
s2 = S([1, 2, 3, 4])
e = S([1, 2, 3])
self.failUnless(s1 & s2 == e)
def testIntersection3(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([])
self.failUnless(s1 & s2 == e)
def testIntersection4(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([])
self.failUnless(s1 & s2 == e)
def testDifference1(self):
s1 = S([1, 2, 3])
s2 = S([5, 4])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference2(self):
s1 = S([1, 2, 3])
s2 = S([])
e = S([1, 2, 3])
self.failUnless(s1 - s2 == e)
def testDifference3(self):
s1 = S([1, 2, 3])
s2 = S([3, 2])
e = S([1])
self.failUnless(s1 - s2 == e)
def testDifference4(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
e = S([])
self.failUnless(s1 - s2 == e)
def testSubset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issubset(s2))
def testSubset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issubset(s1))
def testSubset3(self):
s1 = S([])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset4(self):
s1 = S([1])
s2 = S([1, 2, 3])
self.failUnless(s1.issubset(s2))
def testSubset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issubset(s2))
def testSubset6(self):
s1 = S([1, 4])
s2 = S([1, 2, 3])
self.failUnless(not s1.issubset(s2))
def testSuperset1(self):
s1 = S([1, 2, 3])
s2 = S([3, 2, 1])
self.failUnless(s1.issuperset(s2))
def testSuperset2(self):
s1 = S([1, 2, 3])
self.failUnless(s1.issuperset(s1))
def testSuperset3(self):
s1 = S([1, 2, 3])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset4(self):
s1 = S([1, 2, 3])
s2 = S([1])
self.failUnless(s1.issuperset(s2))
def testSuperset5(self):
s1 = S([])
s2 = S([])
self.failUnless(s1.issuperset(s2))
def testSuperset6(self):
s1 = S([1, 2, 3])
s2 = S([1, 4])
self.failUnless(not s1.issuperset(s2))
def testUpdate1(self):
s1 = S([1, 2, 3])
u = (4, 5, 6)
e = S([1, 2, 3, 4, 5, 6])
s1.update(u)
self.failUnless(s1 == e)
def testUpdate2(self):
s1 = S([1, 2, 3])
u = []
e = S([1, 2, 3])
s1.update(u)
self.failUnless(s1 == e)
def testGetitem(self):
s1 = S([1, 2, 3])
i0 = s1[0]
i1 = s1[1]
i2 = s1[2]
s2 = S([i0, i1, i2])
self.failUnless(s1 == s2)
def testGetslice(self):
s1 = S([1, 2, 3])
slice = s1[0:2]
self.failUnless(len(slice) == 2)
item = s1[2]
slice.append(item)
s2 = S(slice)
self.failUnless(s1 == s2)
def testDelitem(self):
s1 = S([1, 2, 3])
del s1[0]
i1 = s1[0]
i2 = s1[1]
self.failUnless(i1 != i2)
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
self.failUnless(i2 == 1 or i2 == 2 or i2 == 3)
def testDelslice(self):
s1 = S([1, 2, 3])
del s1[0:2]
i1 = s1[0]
self.failUnless(i1 == 1 or i1 == 2 or i1 == 3)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
siddhuwarrier/lockindicator-applet | src/xkb/XkbWrapper.py | 2 | 14017 | import ctypes.util
import types
import logging.handlers
from typeutils.TypeChecker import require
# Copyright (c) 2010 Siddhu Warrier (http://siddhuwarrier.homelinux.org,
# siddhuwarrier AT gmail DOT com).
#
# This file is part of the xkb package.
# The xkb package is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This code has been produced heavily modifying:
# On screen display for learning the keyboard layout Neo2
# Copyright (c) 2009 Martin Zuther (http://www.mzuther.de/)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Thank you for using free software!
__all__ = ["XkbWrapper"]
## @brief Class for providing a simple Xkb Wrapper.
#
# This class provides a simple XKB wrapper, and has been created by extensively
# refactoring the Neo OSD2 XKB wrapper.
# @ingroup xkb
# @author Siddhu Warrier ([email protected])
# @date 31/01/2009.
class XkbWrapper:
##@brief XkbOpenDisplay error codes as a dictionary
# See http://linux.die.net/man/3/xkbopendisplay for more details.
# The values for these are obtained from file /usr/include/X11/XKBlib.h (Ubuntu 9.04):
#these error codes are not visible in a __dict__(). Best we can do for obj abstraction
#in Py, or so methinks.
__errcodes_xkbOpenDisplay = {
"Xkb0D_Success": 0, #success - XkbOpenDisplay worked!
"XkbOD_BadLibraryVersion": 1, #XkbLibraryVersion returned False.
"XkbOD_ConnectionRefused": 2, #the display could not be opened.
"XkbOD_NonXkbServer": 3, #the library and the server have incompatible extension versions.
"XkbOD_BadServerVersion": 4 #the extension is not present in the X server.
}
##@brief XKB constants as a dictionary
constants_xkb = {"XkbUseCoreKbd":0x0100}
## @brief XkbWrapper constructor. Extensively refactored from Neo OSD2.
#
# This constructor maps the C functions to Python equivalents, and thereby
# sets stuff up for future calls.
#
# @date 31/01/2010
def __init__(self):
#set the logger up
self.logger = logging.getLogger("utils")
self.logger.setLevel(logging.FATAL) #by default, only FATAL messages are processed
#add the handler
self.loggerHandler = logging.handlers.RotatingFileHandler("/tmp/logging-utils.log")
#self.loggerHandler = logging.StreamHandler()
#create a formatter
self.loggerFormatter = logging.Formatter("%(asctime)s- %(name)s %(levelname)s: %(message)s")
#set the formatter to the handler
self.loggerHandler.setFormatter(self.loggerFormatter)
#add the handler
self.logger.addHandler(self.loggerHandler)
# dynamically link to "X Keyboard Extension" library
library_xf86misc = ctypes.CDLL(ctypes.util.find_library('Xxf86misc'))
####################################################################################
# Parameter defintions
# define the parameters the function(s) take, and whether they're in, out, or inout.
# 1 => in, 2=> out, 3=>inout
####################################################################################
#The prototype of the XkbOpenDisplay function may be found here:
# http://linux.die.net/man/3/xkbopendisplay
xkbOpenDisplay_params = ((1, 'display_name'), (2, 'event_rtrn'),
(2, 'error_rtrn'), (3, 'major_in_out'),
(3, 'minor_in_out'), (2, 'reason_rtrn'))
#The prototype of the XkbGetIndicatorState function may be found here:
# http://linux.die.net/man/3/xkbgetindicatorstate
xkbGetIndicatorState_params = ((1, 'display'), (1, 'device_spec'),(3, 'state_return'))
####################################################################################
# Prototype defintions
#define the prototype; specifying the types of the arguments that should go in and out.
####################################################################################
#define the XkbOpenDisplay prototype
xkbOpenDisplay_prototype = ctypes.CFUNCTYPE(
ctypes.c_uint, #return type
ctypes.c_char_p,#display_name:h/w display name
ctypes.POINTER(ctypes.c_int),#event_rtrn:backfilled with the extension base event code
ctypes.POINTER(ctypes.c_int),#error_rtrn:backfilled with the extension base error code
ctypes.POINTER(ctypes.c_int),#major_in_out:compile time lib major version in, server major version out
ctypes.POINTER(ctypes.c_int),#minor_in_out:compile time lib min version in, server minor version out
ctypes.POINTER(ctypes.c_int))#reason_rtrn:backfilled with a status code
#(see __errcodes_xkbOpenDisplay to see acceptable values)
#define the XkbGetIndicatorState prototype
xkbGetIndicatorState_prototype = ctypes.CFUNCTYPE(
ctypes.c_bool,#return type: Will not work in Python 2.5
ctypes.c_uint,#display: connection to the X server; obtained using xkbOpenDisplay
ctypes.c_uint,#device_spec: device ID, or XkbUseCoreKbd
ctypes.POINTER(ctypes.c_uint))#backfilled with a mask of the indicator state
####################################################################################
# Actual Definitions.
# Define the actual C functions using low-level wrappers.
#This is a hidden method as we want the API to expose
# the high-level python wrapper that performs type checking etc.
####################################################################################
#define XkbOpenDisplay C function
self.__XkbOpenDisplay__ = xkbOpenDisplay_prototype(('XkbOpenDisplay', library_xf86misc),
xkbOpenDisplay_params)
self.__XkbGetIndicatorState__ = xkbGetIndicatorState_prototype(('XkbGetIndicatorState',
library_xf86misc), xkbGetIndicatorState_params)
####################################################################################
# Error Checker methods.
# Add error checkers.
####################################################################################
self.__XkbOpenDisplay__.errcheck = self.errCheck_openDisplayAndInitXkb
## @brief high-level Python function to encapsulate XkbOpenDisplay(...) function.
#
# Opens a connection to an X server, checks for a compatible version of the Xkb extension
# in both the library and the server, and initializes the extension for use.
#
# The equiv C function's prototype may be found here: http://linux.die.net/man/3/xkbopendisplay
# Please note that we are using C-style var names to maintain consistency with the C
# functions it is wrapping. The most important change tothis function is using my TypeChecker
# decorator to perform type checking, instead of using boilerplate asserts!
#
# However, the wrapper function name uses CamelCase with the first letter uncapitalised.
#
# @param[in] display_name (NoneType or StringType): The name of the display to connect to.
# @param[in,out] major_in_out (Int): compile time lib major version in, server major version out
# @param[in,out] minor_in_out (Int): compile time lib min version in, server minor version out
# @date 31/01/2010
@require(validKwargs = [], display_name = (types.StringType, types.NoneType), major_in_out = types.IntType, minor_in_out = types.IntType)
def openDisplayAndInitXkb(self, display_name, major_in_out, minor_in_out):
self.logger.info("Opening display...")
# convert function arguments to "ctypes", ...
__display_name__ = ctypes.c_char_p(display_name)
__major_in_out__ = ctypes.c_int(major_in_out)
__minor_in_out__ = ctypes.c_int(minor_in_out)
# ... call low-level function ...
ret = self.__XkbOpenDisplay__(__display_name__, __major_in_out__, \
__minor_in_out__)
# ... and return converted return value and function arguments
self.logger.info("...done")
return {'display_handle': ret[0].value, \
'server_major_version': ret[1][3].value, \
'server_minor_version': ret[1][4].value}
## @brief high-level Python function to encapsulate XkbGetIndicatorStates function.
# Obtains the current state of the keyboard indicators
#
# The equiv C function's prototype may be found here:
# http://linux.die.net/man/3/xkbgetindicatorstate
# Please note that we are using C-style var names to maintain consistency with the C
# functions it is wrapping. The most important change to this function is using my TypeChecker
# decorator to perform type checking, instead of using boilerplate asserts!
#
# However, the wrapper function name uses CamelCase with the first letter uncapitalised.
#
# @param[in] display_handle (LongType): The display handler to connect to
# (get it using openDisplayAndInitXkb).
# @param[in] device_spec (Int): The device spec. By default XkbUseCoreKbd
# (get it using constants_xkb)
# @retval indicatorMask (ctypes.c_ulong): The indicator mask
# (by default on Linux: 1 for Caps Lock on, 2 for Num Lock on)
# @date 31/01/2010
@require(validKwargs = [], display_handle = types.LongType, device_spec = types.IntType)
def getIndicatorStates(self, display_handle, device_spec):
self.logger.info("Getting indicator states...")
# convert function arguments to "ctypes", ...
__display_handle__ = ctypes.c_uint(display_handle)
__device_spec__ = ctypes.c_uint(device_spec)
__state_return = ctypes.c_uint()
# ... call low-level function ...
indicatorMask = self.__XkbGetIndicatorState__(__display_handle__, __device_spec__, __state_return)
#...and return this value
self.logger.info("...done")
return indicatorMask
## @brief Error checker for openDisplayAndInitXkb.
#
# @param[in,out] result
# @param[in] func
# @param[in,out] args
# @date 31/01/2010
def errCheck_openDisplayAndInitXkb(self, result, func, args):
# print debugging information if requested
# function didn't return display handle, so let's see why
# not
self.logger.debug( ' [XkbOpenDisplay]')
self.logger.debug( ' Display: %#010x' % result)
self.logger.debug( ' display_name: %s' % args[0].value)
self.logger.debug( ' event_rtrn: %d' % args[1].value)
self.logger.debug( ' error_rtrn: %d' % args[2].value)
self.logger.debug( ' major_in_out: %d' % args[3].value)
self.logger.debug( ' minor_in_out: %d' % args[4].value)
self.logger.debug( ' reason_rt: %d' % args[5].value)
#resut should normally be the display; 0 indicates epic fail.
if result == 0:
# values were taken from file /usr/include/X11/XKBlib.h (Ubuntu 9.04):
# $XFree86: xc/lib/X11/XKBlib.h,v 3.5 2003/04/17 02:06:31 dawes Exp $ #
errorID = args[5].value
for errorCode in self.__errcodes_xkbOpenDisplay.keys():
if errorID == self.__errcodes_xkbOpenDisplay[errorCode]:
break
self.logger.debug( "Error code" + errorCode)
error_message = '"XkbOpenDisplay" reported an error (%s).'%errorCode
raise OSError(error_message)
# return display handle and all function arguments
return (ctypes.c_uint(result), args)
##@brief Changes logging level and logging handler (optional).
#
#@param logLevel (int): logLevel should be a recognised log level.
#@param handler (logging.handlers): The logging handler.
def changeLoggingPreferences(self, logLevel, handler = None):
self.logger.setLevel(logLevel)
if handler != None:
self.logger.removeHandler(self.loggerHandler)
self.loggerHandler = handler
self.loggerHandler.setFormatter(self.loggerFormatter)
self.logger.addHandler(self.loggerHandler)
self.logger.debug("Changed logger level")
#test exec
if __name__ == "__main__":
xkbWrapper = XkbWrapper()
try:
ret = xkbWrapper.openDisplayAndInitXkb(None, 1, 0)
except OSError as osError:
print osError.args[0]
displayHandle = ret['display_handle']
deviceSpec = xkbWrapper.constants_xkb['XkbUseCoreKbd']
print type(xkbWrapper.getIndicatorStates(displayHandle, deviceSpec))
| gpl-3.0 |
VRciF/springy | src/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
snnn/tensorflow | tensorflow/python/kernel_tests/matrix_solve_op_test.py | 47 | 7968 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in False, True:
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
linalg_ops.matrix_solve(matrix, matrix)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session(use_gpu=True):
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_solve(matrix, matrix).eval()
def testConcurrent(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in False, True:
lhs1 = random_ops.random_normal([3, 3], seed=42)
lhs2 = random_ops.random_normal([3, 3], seed=42)
rhs1 = random_ops.random_normal([3, 3], seed=42)
rhs2 = random_ops.random_normal([3, 3], seed=42)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = sess.run(all_ops)
self.assertAllEqual(val[0], val[1])
self.assertAllEqual(val[2], val[3])
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
| apache-2.0 |
boyxuper/urllib3 | test/test_util.py | 3 | 17347 | import hashlib
import warnings
import logging
import unittest
import ssl
from itertools import chain
from mock import patch, Mock
from urllib3 import add_stderr_logger, disable_warnings
from urllib3.util.request import make_headers
from urllib3.util.timeout import Timeout
from urllib3.util.url import (
get_host,
parse_url,
split_first,
Url,
)
from urllib3.util.ssl_ import (
resolve_cert_reqs,
ssl_wrap_socket,
_const_compare_digest_backport,
)
from urllib3.exceptions import (
LocationParseError,
TimeoutStateError,
InsecureRequestWarning,
SSLError,
SNIMissingWarning,
)
from urllib3.util import is_fp_closed, ssl_
from . import clear_warnings
# This number represents a time in seconds, it doesn't mean anything in
# isolation. Setting to a high-ish value to avoid conflicts with the smaller
# numbers used for timeouts
TIMEOUT_EPOCH = 1000
class TestUtil(unittest.TestCase):
def test_get_host(self):
url_host_map = {
# Hosts
'http://google.com/mail': ('http', 'google.com', None),
'http://google.com/mail/': ('http', 'google.com', None),
'google.com/mail': ('http', 'google.com', None),
'http://google.com/': ('http', 'google.com', None),
'http://google.com': ('http', 'google.com', None),
'http://www.google.com': ('http', 'www.google.com', None),
'http://mail.google.com': ('http', 'mail.google.com', None),
'http://google.com:8000/mail/': ('http', 'google.com', 8000),
'http://google.com:8000': ('http', 'google.com', 8000),
'https://google.com': ('https', 'google.com', None),
'https://google.com:8000': ('https', 'google.com', 8000),
'http://user:[email protected]:1234': ('http', '127.0.0.1', 1234),
'http://google.com/foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com?foo=http://bar:42/baz': ('http', 'google.com', None),
'http://google.com#foo=http://bar:42/baz': ('http', 'google.com', None),
# IPv4
'173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7': ('http', '173.194.35.7', None),
'http://173.194.35.7/test': ('http', '173.194.35.7', None),
'http://173.194.35.7:80': ('http', '173.194.35.7', 80),
'http://173.194.35.7:80/test': ('http', '173.194.35.7', 80),
# IPv6
'[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]/test': ('http', '[2a00:1450:4001:c01::67]', None),
'http://[2a00:1450:4001:c01::67]:80': ('http', '[2a00:1450:4001:c01::67]', 80),
'http://[2a00:1450:4001:c01::67]:80/test': ('http', '[2a00:1450:4001:c01::67]', 80),
# More IPv6 from http://www.ietf.org/rfc/rfc2732.txt
'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8000/index.html': ('http', '[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]', 8000),
'http://[1080:0:0:0:8:800:200C:417A]/index.html': ('http', '[1080:0:0:0:8:800:200C:417A]', None),
'http://[3ffe:2a00:100:7031::1]': ('http', '[3ffe:2a00:100:7031::1]', None),
'http://[1080::8:800:200C:417A]/foo': ('http', '[1080::8:800:200C:417A]', None),
'http://[::192.9.5.5]/ipng': ('http', '[::192.9.5.5]', None),
'http://[::FFFF:129.144.52.38]:42/index.html': ('http', '[::FFFF:129.144.52.38]', 42),
'http://[2010:836B:4179::836B:4179]': ('http', '[2010:836B:4179::836B:4179]', None),
}
for url, expected_host in url_host_map.items():
returned_host = get_host(url)
self.assertEqual(returned_host, expected_host)
def test_invalid_host(self):
# TODO: Add more tests
invalid_host = [
'http://google.com:foo',
'http://::1/',
'http://::1:80/',
]
for location in invalid_host:
self.assertRaises(LocationParseError, get_host, location)
parse_url_host_map = {
'http://google.com/mail': Url('http', host='google.com', path='/mail'),
'http://google.com/mail/': Url('http', host='google.com', path='/mail/'),
'http://google.com/mail': Url('http', host='google.com', path='mail'),
'google.com/mail': Url(host='google.com', path='/mail'),
'http://google.com/': Url('http', host='google.com', path='/'),
'http://google.com': Url('http', host='google.com'),
'http://google.com?foo': Url('http', host='google.com', path='', query='foo'),
# Path/query/fragment
'': Url(),
'/': Url(path='/'),
'#?/!google.com/?foo#bar': Url(path='', fragment='?/!google.com/?foo#bar'),
'/foo': Url(path='/foo'),
'/foo?bar=baz': Url(path='/foo', query='bar=baz'),
'/foo?bar=baz#banana?apple/orange': Url(path='/foo', query='bar=baz', fragment='banana?apple/orange'),
# Port
'http://google.com/': Url('http', host='google.com', path='/'),
'http://google.com:80/': Url('http', host='google.com', port=80, path='/'),
'http://google.com:80': Url('http', host='google.com', port=80),
# Auth
'http://foo:bar@localhost/': Url('http', auth='foo:bar', host='localhost', path='/'),
'http://foo@localhost/': Url('http', auth='foo', host='localhost', path='/'),
'http://foo:bar@baz@localhost/': Url('http', auth='foo:bar@baz', host='localhost', path='/'),
'http://@': Url('http', host=None, auth='')
}
non_round_tripping_parse_url_host_map = {
# Path/query/fragment
'?': Url(path='', query=''),
'#': Url(path='', fragment=''),
# Empty Port
'http://google.com:': Url('http', host='google.com'),
'http://google.com:/': Url('http', host='google.com', path='/'),
}
def test_parse_url(self):
for url, expected_Url in chain(self.parse_url_host_map.items(), self.non_round_tripping_parse_url_host_map.items()):
returned_Url = parse_url(url)
self.assertEqual(returned_Url, expected_Url)
def test_unparse_url(self):
for url, expected_Url in self.parse_url_host_map.items():
self.assertEqual(url, expected_Url.url)
def test_parse_url_invalid_IPv6(self):
self.assertRaises(ValueError, parse_url, '[::1')
def test_Url_str(self):
U = Url('http', host='google.com')
self.assertEqual(str(U), U.url)
def test_request_uri(self):
url_host_map = {
'http://google.com/mail': '/mail',
'http://google.com/mail/': '/mail/',
'http://google.com/': '/',
'http://google.com': '/',
'': '/',
'/': '/',
'?': '/?',
'#': '/',
'/foo?bar=baz': '/foo?bar=baz',
}
for url, expected_request_uri in url_host_map.items():
returned_url = parse_url(url)
self.assertEqual(returned_url.request_uri, expected_request_uri)
def test_netloc(self):
url_netloc_map = {
'http://google.com/mail': 'google.com',
'http://google.com:80/mail': 'google.com:80',
'google.com/foobar': 'google.com',
'google.com:12345': 'google.com:12345',
}
for url, expected_netloc in url_netloc_map.items():
self.assertEqual(parse_url(url).netloc, expected_netloc)
def test_make_headers(self):
self.assertEqual(
make_headers(accept_encoding=True),
{'accept-encoding': 'gzip,deflate'})
self.assertEqual(
make_headers(accept_encoding='foo,bar'),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=['foo', 'bar']),
{'accept-encoding': 'foo,bar'})
self.assertEqual(
make_headers(accept_encoding=True, user_agent='banana'),
{'accept-encoding': 'gzip,deflate', 'user-agent': 'banana'})
self.assertEqual(
make_headers(user_agent='banana'),
{'user-agent': 'banana'})
self.assertEqual(
make_headers(keep_alive=True),
{'connection': 'keep-alive'})
self.assertEqual(
make_headers(basic_auth='foo:bar'),
{'authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(proxy_basic_auth='foo:bar'),
{'proxy-authorization': 'Basic Zm9vOmJhcg=='})
self.assertEqual(
make_headers(disable_cache=True),
{'cache-control': 'no-cache'})
def test_split_first(self):
test_cases = {
('abcd', 'b'): ('a', 'cd', 'b'),
('abcd', 'cb'): ('a', 'cd', 'b'),
('abcd', ''): ('abcd', '', None),
('abcd', 'a'): ('', 'bcd', 'a'),
('abcd', 'ab'): ('', 'bcd', 'a'),
}
for input, expected in test_cases.items():
output = split_first(*input)
self.assertEqual(output, expected)
def test_add_stderr_logger(self):
handler = add_stderr_logger(level=logging.INFO) # Don't actually print debug
logger = logging.getLogger('urllib3')
self.assertTrue(handler in logger.handlers)
logger.debug('Testing add_stderr_logger')
logger.removeHandler(handler)
def test_disable_warnings(self):
with warnings.catch_warnings(record=True) as w:
clear_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
disable_warnings()
warnings.warn('This is a test.', InsecureRequestWarning)
self.assertEqual(len(w), 1)
def _make_time_pass(self, seconds, timeout, time_mock):
""" Make some time pass for the timeout object """
time_mock.return_value = TIMEOUT_EPOCH
timeout.start_connect()
time_mock.return_value = TIMEOUT_EPOCH + seconds
return timeout
def test_invalid_timeouts(self):
try:
Timeout(total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(connect=2, total=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
try:
Timeout(read=-1)
self.fail("negative value should throw exception")
except ValueError as e:
self.assertTrue('less than' in str(e))
# Booleans are allowed also by socket.settimeout and converted to the
# equivalent float (1.0 for True, 0.0 for False)
Timeout(connect=False, read=True)
try:
Timeout(read="foo")
self.fail("string value should not be allowed")
except ValueError as e:
self.assertTrue('int or float' in str(e))
@patch('urllib3.util.timeout.current_time')
def test_timeout(self, current_time):
timeout = Timeout(total=3)
# make 'no time' elapse
timeout = self._make_time_pass(seconds=0, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 3)
self.assertEqual(timeout.connect_timeout, 3)
timeout = Timeout(total=3, connect=2)
self.assertEqual(timeout.connect_timeout, 2)
timeout = Timeout()
self.assertEqual(timeout.connect_timeout, Timeout.DEFAULT_TIMEOUT)
# Connect takes 5 seconds, leaving 5 seconds for read
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=5, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 5)
# Connect takes 2 seconds, read timeout still 7 seconds
timeout = Timeout(total=10, read=7)
timeout = self._make_time_pass(seconds=2, timeout=timeout,
time_mock=current_time)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=10, read=7)
self.assertEqual(timeout.read_timeout, 7)
timeout = Timeout(total=None, read=None, connect=None)
self.assertEqual(timeout.connect_timeout, None)
self.assertEqual(timeout.read_timeout, None)
self.assertEqual(timeout.total, None)
timeout = Timeout(5)
self.assertEqual(timeout.total, 5)
def test_timeout_str(self):
timeout = Timeout(connect=1, read=2, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=2, total=3)")
timeout = Timeout(connect=1, read=None, total=3)
self.assertEqual(str(timeout), "Timeout(connect=1, read=None, total=3)")
@patch('urllib3.util.timeout.current_time')
def test_timeout_elapsed(self, current_time):
current_time.return_value = TIMEOUT_EPOCH
timeout = Timeout(total=3)
self.assertRaises(TimeoutStateError, timeout.get_connect_duration)
timeout.start_connect()
self.assertRaises(TimeoutStateError, timeout.start_connect)
current_time.return_value = TIMEOUT_EPOCH + 2
self.assertEqual(timeout.get_connect_duration(), 2)
current_time.return_value = TIMEOUT_EPOCH + 37
self.assertEqual(timeout.get_connect_duration(), 37)
def test_resolve_cert_reqs(self):
self.assertEqual(resolve_cert_reqs(None), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_NONE), ssl.CERT_NONE)
self.assertEqual(resolve_cert_reqs(ssl.CERT_REQUIRED), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('REQUIRED'), ssl.CERT_REQUIRED)
self.assertEqual(resolve_cert_reqs('CERT_REQUIRED'), ssl.CERT_REQUIRED)
def test_is_fp_closed_object_supports_closed(self):
class ClosedFile(object):
@property
def closed(self):
return True
self.assertTrue(is_fp_closed(ClosedFile()))
def test_is_fp_closed_object_has_none_fp(self):
class NoneFpFile(object):
@property
def fp(self):
return None
self.assertTrue(is_fp_closed(NoneFpFile()))
def test_is_fp_closed_object_has_fp(self):
class FpFile(object):
@property
def fp(self):
return True
self.assertTrue(not is_fp_closed(FpFile()))
def test_is_fp_closed_object_has_neither_fp_nor_closed(self):
class NotReallyAFile(object):
pass
self.assertRaises(ValueError, is_fp_closed, NotReallyAFile())
def test_ssl_wrap_socket_loads_the_cert_chain(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, sock=socket,
certfile='/path/to/certfile')
mock_context.load_cert_chain.assert_called_once_with(
'/path/to/certfile', None)
def test_ssl_wrap_socket_loads_verify_locations(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_certs='/path/to/pem',
sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
'/path/to/pem', None)
def test_ssl_wrap_socket_loads_certificate_directories(self):
socket = object()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_cert_dir='/path/to/pems',
sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
None, '/path/to/pems')
def test_ssl_wrap_socket_with_no_sni(self):
socket = object()
mock_context = Mock()
# Ugly preservation of original value
HAS_SNI = ssl_.HAS_SNI
ssl_.HAS_SNI = False
ssl_wrap_socket(ssl_context=mock_context, sock=socket)
mock_context.wrap_socket.assert_called_once_with(socket)
ssl_.HAS_SNI = HAS_SNI
def test_ssl_wrap_socket_with_no_sni_warns(self):
socket = object()
mock_context = Mock()
# Ugly preservation of original value
HAS_SNI = ssl_.HAS_SNI
ssl_.HAS_SNI = False
with patch('warnings.warn') as warn:
ssl_wrap_socket(ssl_context=mock_context, sock=socket)
mock_context.wrap_socket.assert_called_once_with(socket)
ssl_.HAS_SNI = HAS_SNI
self.assertTrue(warn.call_count >= 1)
warnings = [call[0][1] for call in warn.call_args_list]
self.assertTrue(SNIMissingWarning in warnings)
def test_const_compare_digest_fallback(self):
target = hashlib.sha256(b'abcdef').digest()
self.assertTrue(_const_compare_digest_backport(target, target))
prefix = target[:-1]
self.assertFalse(_const_compare_digest_backport(target, prefix))
suffix = target + b'0'
self.assertFalse(_const_compare_digest_backport(target, suffix))
incorrect = hashlib.sha256(b'xyz').digest()
self.assertFalse(_const_compare_digest_backport(target, incorrect))
| mit |
nckx/dstat | plugins/dstat_vm_mem.py | 4 | 1136 | ### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware memory stats
### Displays memory stats coming from the hypervisor inside VMware VMs.
### The vmGuestLib API from VMware Tools needs to be installed
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmware memory'
self.vars = ('active', 'ballooned', 'mapped', 'swapped', 'used')
self.nick = ('active', 'balln', 'mappd', 'swapd', 'used')
self.type = 'd'
self.width = 5
self.scale = 1024
def check(self):
try:
global vmguestlib
import vmguestlib
self.gl = vmguestlib.VMGuestLib()
except:
raise Exception, 'Needs python-vmguestlib module'
def extract(self):
self.gl.UpdateInfo()
self.val['active'] = self.gl.GetMemActiveMB() * 1024 ** 2
self.val['ballooned'] = self.gl.GetMemBalloonedMB() * 1024 ** 2
self.val['mapped'] = self.gl.GetMemMappedMB() * 1024 ** 2
self.val['swapped'] = self.gl.GetMemSwappedMB() * 1024 ** 2
self.val['used'] = self.gl.GetMemUsedMB() * 1024 ** 2
# vim:ts=4:sw=4 | gpl-2.0 |
jbogaardt/chainladder-python | chainladder/development/tests/test_munich.py | 1 | 1554 | import numpy as np
import chainladder as cl
from rpy2.robjects.packages import importr
from rpy2.robjects import r
CL = importr("ChainLadder")
def test_mcl_paid():
df = r("MunichChainLadder(MCLpaid, MCLincurred)").rx("MCLPaid")
p = cl.MunichAdjustment(paid_to_incurred=("paid", "incurred")).fit(
cl.Development(sigma_interpolation="mack").fit_transform(cl.load_sample("mcl"))
)
xp = p.ldf_.get_array_module()
arr = xp.array(df[0])
assert xp.allclose(arr, p.munich_full_triangle_[0, 0, 0, :, :], atol=1e-5)
def test_mcl_incurred():
df = r("MunichChainLadder(MCLpaid, MCLincurred)").rx("MCLIncurred")
p = cl.MunichAdjustment(paid_to_incurred=[("paid", "incurred")]).fit(
cl.Development(sigma_interpolation="mack").fit_transform(cl.load_sample("mcl"))
)
xp = p.ldf_.get_array_module()
arr = xp.array(df[0])
assert xp.allclose(arr, p.munich_full_triangle_[1, 0, 0, :, :], atol=1e-5)
def test_mcl_ult():
mcl = cl.load_sample("mcl")
dev = cl.Development().fit_transform(mcl)
cl_traditional = cl.Chainladder().fit(dev).ultimate_
dev_munich = cl.MunichAdjustment(
paid_to_incurred=[("paid", "incurred")]
).fit_transform(dev)
cl_munich = cl.Chainladder().fit(dev_munich).ultimate_
def test_mcl_rollforward():
mcl = cl.load_sample("mcl")
mcl_prior = mcl[mcl.valuation < mcl.valuation_date]
munich = cl.MunichAdjustment(paid_to_incurred=[("paid", "incurred")]).fit(mcl_prior)
new = munich.transform(mcl)
cl.Chainladder().fit(new).ultimate_
| mit |
darkforestzero/buck | third-party/py/pex/pex/interpreter.py | 52 | 12996 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""pex support for interacting with interpreters."""
from __future__ import absolute_import
import os
import re
import subprocess
import sys
from collections import defaultdict
from pkg_resources import Distribution, Requirement, find_distributions
from .base import maybe_requirement
from .compatibility import string
from .tracer import TRACER
try:
from numbers import Integral
except ImportError:
Integral = (int, long)
# Determine in the most platform-compatible way possible the identity of the interpreter
# and its known packages.
ID_PY = b"""
import sys
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
print("%s %s %s %s" % (
subversion,
sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
setuptools_path = None
try:
import pkg_resources
except ImportError:
sys.exit(0)
requirements = {}
for item in sys.path:
for dist in pkg_resources.find_distributions(item):
requirements[str(dist.as_requirement())] = dist.location
for requirement_str, location in requirements.items():
rs = requirement_str.split('==', 2)
if len(rs) == 2:
print('%s %s %s' % (rs[0], rs[1], location))
"""
class PythonIdentity(object):
class Error(Exception): pass
class InvalidError(Error): pass
class UnknownRequirement(Error): pass
# TODO(wickman) Support interpreter-specific versions, e.g. PyPy-2.2.1
HASHBANGS = {
'CPython': 'python%(major)d.%(minor)d',
'Jython': 'jython',
'PyPy': 'pypy',
}
@classmethod
def get_subversion(cls):
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
return subversion
@classmethod
def get(cls):
return cls(cls.get_subversion(), sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def from_id_string(cls, id_string):
values = id_string.split()
if len(values) != 4:
raise cls.InvalidError("Invalid id string: %s" % id_string)
return cls(str(values[0]), int(values[1]), int(values[2]), int(values[3]))
@classmethod
def from_path(cls, dirname):
interp, version = dirname.split('-')
major, minor, patch = version.split('.')
return cls(str(interp), int(major), int(minor), int(patch))
def __init__(self, interpreter, major, minor, patch):
for var in (major, minor, patch):
assert isinstance(var, Integral)
self._interpreter = interpreter
self._version = (major, minor, patch)
@property
def interpreter(self):
return self._interpreter
@property
def version(self):
return self._version
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
return Distribution(project_name=self._interpreter, version='.'.join(map(str, self._version)))
@classmethod
def parse_requirement(cls, requirement, default_interpreter='CPython'):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse('%s%s' % (default_interpreter, requirement))
except ValueError:
raise ValueError('Unknown requirement string: %s' % requirement)
return requirement
else:
raise ValueError('Unknown requirement type: %r' % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
hashbang_string = self.HASHBANGS.get(self.interpreter, 'CPython') % {
'major': self._version[0],
'minor': self._version[1],
'patch': self._version[2],
}
return '#!/usr/bin/env %s' % hashbang_string
@property
def python(self):
# return the python version in the format of the 'python' key for distributions
# specifically, '2.6', '2.7', '3.2', etc.
return '%d.%d' % (self.version[0:2])
def __str__(self):
return '%s-%s.%s.%s' % (self._interpreter,
self._version[0], self._version[1], self._version[2])
def __repr__(self):
return 'PythonIdentity(%r, %s, %s, %s)' % (
self._interpreter, self._version[0], self._version[1], self._version[2])
def __eq__(self, other):
return all([isinstance(other, PythonIdentity),
self.interpreter == other.interpreter,
self.version == other.version])
def __hash__(self):
return hash((self._interpreter, self._version))
class PythonInterpreter(object):
REGEXEN = (
re.compile(r'jython$'),
# NB: OSX ships python binaries named Python so we allow for capital-P.
re.compile(r'[Pp]ython$'),
re.compile(r'python[23].[0-9]$'),
re.compile(r'pypy$'),
re.compile(r'pypy-1.[0-9]$'),
)
CACHE = {} # memoize executable => PythonInterpreter
try:
# Versions of distribute prior to the setuptools merge would automatically replace
# 'setuptools' requirements with 'distribute'. It provided the 'replacement' kwarg
# to toggle this, but it was removed post-merge.
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0', replacement=False)
except TypeError:
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0')
class Error(Exception): pass
class IdentificationError(Error): pass
class InterpreterNotFound(Error): pass
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@classmethod
def all(cls, paths=None):
if paths is None:
paths = os.getenv('PATH', '').split(':')
return cls.filter(cls.find(paths))
@classmethod
def _parse_extras(cls, output_lines):
def iter_lines():
for line in output_lines:
try:
dist_name, dist_version, location = line.split()
except ValueError:
raise cls.IdentificationError('Could not identify requirement: %s' % line)
yield ((dist_name, dist_version), location)
return dict(iter_lines())
@classmethod
def _from_binary_internal(cls, path_extras):
def iter_extras():
for item in sys.path + list(path_extras):
for dist in find_distributions(item):
if dist.version:
yield ((dist.key, dist.version), dist.location)
return cls(sys.executable, PythonIdentity.get(), dict(iter_extras()))
@classmethod
def _from_binary_external(cls, binary, path_extras):
environ = cls.sanitized_environment()
environ['PYTHONPATH'] = ':'.join(path_extras)
po = subprocess.Popen(
[binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environ)
so, _ = po.communicate(ID_PY)
output = so.decode('utf8').splitlines()
if len(output) == 0:
raise cls.IdentificationError('Could not establish identity of %s' % binary)
identity, extras = output[0], output[1:]
return cls(
binary,
PythonIdentity.from_id_string(identity),
extras=cls._parse_extras(extras))
@classmethod
def expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return [os.path.join(path, fn) for fn in os.listdir(path)]
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
"""
paths = os.getenv('PATH', '').split(':')
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if hashbang == basefile:
try:
return cls.from_binary(fn)
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
@classmethod
def from_binary(cls, binary, path_extras=None):
path_extras = path_extras or ()
if binary not in cls.CACHE:
if binary == sys.executable:
cls.CACHE[binary] = cls._from_binary_internal(path_extras)
else:
cls.CACHE[binary] = cls._from_binary_external(binary, path_extras)
return cls.CACHE[binary]
@classmethod
def find(cls, paths):
"""
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
"""
pythons = []
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):
try:
pythons.append(cls.from_binary(fn))
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
continue
return pythons
@classmethod
def filter(cls, pythons):
"""
Given a map of python interpreters in the format provided by PythonInterpreter.find(),
filter out duplicate versions and versions we would prefer not to use.
Returns a map in the same format as find.
"""
good = []
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
return (version[MAJOR] == 2 and version[MINOR] >= 6 or
version[MAJOR] == 3 and version[MINOR] >= 2)
all_versions = set(interpreter.identity.version for interpreter in pythons)
good_versions = filter(version_filter, all_versions)
for version in good_versions:
# For each candidate, use the latest version we find on the filesystem.
candidates = defaultdict(list)
for interp in pythons:
if interp.identity.version == version:
candidates[interp.identity.interpreter].append(interp)
for interp_class in candidates:
candidates[interp_class].sort(
key=lambda interp: os.path.getmtime(interp.binary), reverse=True)
good.append(candidates[interp_class].pop(0))
return good
@classmethod
def sanitized_environment(cls):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.6/2.7 breaks.
env_copy = os.environ.copy()
env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env_copy
@classmethod
def replace(cls, requirement):
self = cls.get()
if self.identity.matches(requirement):
return False
for pi in cls.all():
if pi.identity.matches(requirement):
break
else:
raise cls.InterpreterNotFound('Could not find interpreter matching filter!')
os.execve(pi.binary, [pi.binary] + sys.argv, cls.sanitized_environment())
def __init__(self, binary, identity, extras=None):
"""Construct a PythonInterpreter.
You should probably PythonInterpreter.from_binary instead.
:param binary: The full path of the python binary.
:param identity: The :class:`PythonIdentity` of the PythonInterpreter.
:param extras: A mapping from (dist.key, dist.version) to dist.location
of the extras associated with this interpreter.
"""
self._binary = os.path.realpath(binary)
self._extras = extras or {}
self._identity = identity
def with_extra(self, key, version, location):
extras = self._extras.copy()
extras[(key, version)] = location
return self.__class__(self._binary, self._identity, extras)
@property
def extras(self):
return self._extras.copy()
@property
def binary(self):
return self._binary
@property
def identity(self):
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
return str(self._identity)
def satisfies(self, capability):
if not isinstance(capability, list):
raise TypeError('Capability must be a list, got %s' % type(capability))
return not any(self.get_location(req) is None for req in capability)
def get_location(self, req):
req = maybe_requirement(req)
for dist, location in self.extras.items():
dist_name, dist_version = dist
if req.key == dist_name and dist_version in req:
return location
def __hash__(self):
return hash((self._binary, self._identity))
def __eq__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return (self._binary, self._identity) == (other._binary, other._identity)
def __lt__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return self.version < other.version
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary, self._identity, self._extras)
| apache-2.0 |
meteorcloudy/tensorflow | tensorflow/contrib/image/python/ops/image_ops.py | 6 | 21765 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_image_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_image_ops.so"))
_IMAGE_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64,
dtypes.float16, dtypes.float32, dtypes.float64])
ops.RegisterShape("ImageConnectedComponents")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ImageProjectiveTransform")(common_shapes.call_cpp_shape_fn)
def rotate(images, angles, interpolation="NEAREST", name=None):
"""Rotate image(s) counterclockwise by the passed angle(s) in radians.
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
angles: A scalar angle to rotate all images by, or (if images has rank 4)
a vector of length num_images, with an angle for each image in the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, rotated by the given
angle(s). Empty space due to the rotation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "rotate"):
image_or_images = ops.convert_to_tensor(images)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
image_height = math_ops.cast(array_ops.shape(images)[1],
dtypes.float32)[None]
image_width = math_ops.cast(array_ops.shape(images)[2],
dtypes.float32)[None]
output = transform(
images,
angles_to_projective_transforms(angles, image_height, image_width),
interpolation=interpolation)
if image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def translate(images, translations, interpolation="NEAREST", name=None):
"""Translate image(s) by the passed vectors(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
translations: A vector representing [dx, dy] or (if images has rank 4)
a matrix of length num_images, with a [dx, dy] vector for each image in
the batch.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, translated by the given
vector(s). Empty space due to the translation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "translate"):
return transform(
images,
translations_to_projective_transforms(translations),
interpolation=interpolation)
def angles_to_projective_transforms(angles,
image_height,
image_width,
name=None):
"""Returns projective transform(s) for the given angle(s).
Args:
angles: A scalar angle to rotate all images by, or (for batches of images)
a vector with an angle to rotate each image in the batch. The rank must
be statically known (the shape is not `TensorShape(None)`.
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
Returns:
A tensor of shape (num_images, 8). Projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "angles_to_projective_transforms"):
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
return array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
def translations_to_projective_transforms(translations, name=None):
"""Returns projective transform(s) for the given translation(s).
Args:
translations: A 2-element list representing [dx, dy] or a matrix of
2-element lists representing [dx, dy] to translate for each image
(for a batch of images). The rank must be statically known (the shape
is not `TensorShape(None)`.
name: The name of the op.
Returns:
A tensor of shape (num_images, 8) projective transforms which can be given
to `tf.contrib.image.transform`.
"""
with ops.name_scope(name, "translations_to_projective_transforms"):
translation_or_translations = ops.convert_to_tensor(
translations, name="translations", dtype=dtypes.float32)
if translation_or_translations.get_shape().ndims is None:
raise TypeError(
"translation_or_translations rank must be statically known")
elif len(translation_or_translations.get_shape()) == 1:
translations = translation_or_translations[None]
elif len(translation_or_translations.get_shape()) == 2:
translations = translation_or_translations
else:
raise TypeError("Translations should have rank 1 or 2.")
num_translations = array_ops.shape(translations)[0]
# The translation matrix looks like:
# [[1 0 -dx]
# [0 1 -dy]
# [0 0 1]]
# where the last entry is implicit.
# Translation matrices are always float32.
return array_ops.concat(
values=[
array_ops.ones((num_translations, 1), dtypes.float32),
array_ops.zeros((num_translations, 1), dtypes.float32),
-translations[:, 0, None],
array_ops.zeros((num_translations, 1), dtypes.float32),
array_ops.ones((num_translations, 1), dtypes.float32),
-translations[:, 1, None],
array_ops.zeros((num_translations, 2), dtypes.float32),
],
axis=1)
def transform(images, transforms, interpolation="NEAREST", name=None):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW). The rank must be statically known (the
shape is not `TensorShape(None)`.
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to
the transform mapping input points to output points. Note that gradients
are not backpropagated into transformation parameters.
interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR".
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
with ops.name_scope(name, "transform"):
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
elif image_or_images.get_shape().ndims is None:
raise TypeError("image_or_images rank must be statically known")
elif len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif transform_or_transforms.get_shape().ndims is None:
raise TypeError(
"transform_or_transforms rank must be statically known")
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
output = gen_image_ops.image_projective_transform(
images, transforms, interpolation=interpolation.upper())
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def compose_transforms(*transforms):
"""Composes the transforms tensors.
Args:
*transforms: List of image projective transforms to be composed. Each
transform is length 8 (single transform) or shape (N, 8) (batched
transforms). The shapes of all inputs must be equal, and at least one
input must be given.
Returns:
A composed transform tensor. When passed to `tf.contrib.image.transform`,
equivalent to applying each of the given transforms to the image in
order.
"""
assert transforms, "transforms cannot be empty"
with ops.name_scope("compose_transforms"):
composed = flat_transforms_to_matrices(transforms[0])
for tr in transforms[1:]:
# Multiply batches of matrices.
composed = math_ops.matmul(composed, flat_transforms_to_matrices(tr))
return matrices_to_flat_transforms(composed)
def flat_transforms_to_matrices(transforms):
"""Converts `tf.contrib.image` projective transforms to affine matrices.
Note that the output matrices map output coordinates to input coordinates. For
the forward transformation matrix, call `tf.linalg.inv` on the result.
Args:
transforms: Vector of length 8, or batches of transforms with shape
`(N, 8)`.
Returns:
3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
*output coordinates* (in homogeneous coordinates) of each transform to the
corresponding *input coordinates*.
Raises:
ValueError: If `transforms` have an invalid shape.
"""
with ops.name_scope("flat_transforms_to_matrices"):
transforms = ops.convert_to_tensor(transforms, name="transforms")
if transforms.shape.ndims not in (1, 2):
raise ValueError("Transforms should be 1D or 2D, got: %s" % transforms)
# Make the transform(s) 2D in case the input is a single transform.
transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))
num_transforms = array_ops.shape(transforms)[0]
# Add a column of ones for the implicit last entry in the matrix.
return array_ops.reshape(
array_ops.concat(
[transforms, array_ops.ones([num_transforms, 1])], axis=1),
constant_op.constant([-1, 3, 3]))
def matrices_to_flat_transforms(transform_matrices):
"""Converts affine matrices to `tf.contrib.image` projective transforms.
Note that we expect matrices that map output coordinates to input coordinates.
To convert forward transformation matrices, call `tf.linalg.inv` on the
matrices and use the result here.
Args:
transform_matrices: One or more affine transformation matrices, for the
reverse transformation in homogeneous coordinates. Shape `(3, 3)` or
`(N, 3, 3)`.
Returns:
2D tensor of flat transforms with shape `(N, 8)`, which may be passed into
`tf.contrib.image.transform`.
Raises:
ValueError: If `transform_matrices` have an invalid shape.
"""
with ops.name_scope("matrices_to_flat_transforms"):
transform_matrices = ops.convert_to_tensor(
transform_matrices, name="transform_matrices")
if transform_matrices.shape.ndims not in (2, 3):
raise ValueError(
"Matrices should be 2D or 3D, got: %s" % transform_matrices)
# Flatten each matrix.
transforms = array_ops.reshape(transform_matrices,
constant_op.constant([-1, 9]))
# Divide each matrix by the last entry (normally 1).
transforms /= transforms[:, 8:9]
return transforms[:, :8]
@ops.RegisterGradient("ImageProjectiveTransform")
def _image_projective_transform_grad(op, grad):
"""Computes the gradient for ImageProjectiveTransform."""
images = op.inputs[0]
transforms = op.inputs[1]
interpolation = op.get_attr("interpolation")
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
# Invert transformations
transforms = flat_transforms_to_matrices(transforms=transforms)
inverse = linalg_ops.matrix_inverse(transforms)
transforms = matrices_to_flat_transforms(inverse)
output = gen_image_ops.image_projective_transform(
grad, transforms, interpolation=interpolation)
if len(image_or_images.get_shape()) == 2:
return [output[0, :, :, 0], None]
elif len(image_or_images.get_shape()) == 3:
return [output[0, :, :, :], None]
else:
return [output, None]
def bipartite_match(distance_mat,
num_valid_rows,
top_k=-1,
name="bipartite_match"):
"""Find bipartite matching based on a given distance matrix.
A greedy bi-partite matching algorithm is used to obtain the matching with
the (greedy) minimum distance.
Args:
distance_mat: A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
pair-wise distance matrix between the entities represented by each row and
each column. It is an asymmetric matrix. The smaller the distance is, the
more similar the pairs are. The bipartite matching is to minimize the
distances.
num_valid_rows: A scalar or a 1-D tensor with one element describing the
number of valid rows of distance_mat to consider for the bipartite
matching. If set to be negative, then all rows from `distance_mat` are
used.
top_k: A scalar that specifies the number of top-k matches to retrieve.
If set to be negative, then is set according to the maximum number of
matches from `distance_mat`.
name: The name of the op.
Returns:
row_to_col_match_indices: A vector of length num_rows, which is the number
of rows of the input `distance_matrix`. If `row_to_col_match_indices[i]`
is not -1, row i is matched to column `row_to_col_match_indices[i]`.
col_to_row_match_indices: A vector of length num_columns, which is the
number of columns of the input distance matrix.
If `col_to_row_match_indices[j]` is not -1, column j is matched to row
`col_to_row_match_indices[j]`.
"""
result = gen_image_ops.bipartite_match(
distance_mat, num_valid_rows, top_k, name=name)
return result
def connected_components(images):
"""Labels the connected components in a batch of images.
A component is a set of pixels in a single input image, which are all adjacent
and all have the same non-zero value. The components using a squared
connectivity of one (all True entries are joined with their neighbors above,
below, left, and right). Components across all images have consecutive ids 1
through n. Components are labeled according to the first pixel of the
component appearing in row-major order (lexicographic order by
image_index_in_batch, row, col). Zero entries all have an output id of 0.
This op is equivalent with `scipy.ndimage.measurements.label` on a 2D array
with the default structuring element (which is the connectivity used here).
Args:
images: A 2D (H, W) or 3D (N, H, W) Tensor of boolean image(s).
Returns:
Components with the same shape as `images`. False entries in `images` have
value 0, and all True entries map to a component id > 0.
Raises:
TypeError: if `images` is not 2D or 3D.
"""
with ops.name_scope("connected_components"):
image_or_images = ops.convert_to_tensor(images, name="images")
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images
else:
raise TypeError(
"images should have rank 2 (HW) or 3 (NHW). Static shape is %s" %
image_or_images.get_shape())
components = gen_image_ops.image_connected_components(images)
# TODO(ringwalt): Component id renaming should be done in the op, to avoid
# constructing multiple additional large tensors.
components_flat = array_ops.reshape(components, [-1])
unique_ids, id_index = array_ops.unique(components_flat)
id_is_zero = array_ops.where(math_ops.equal(unique_ids, 0))[:, 0]
# Map each nonzero id to consecutive values.
nonzero_consecutive_ids = math_ops.range(
array_ops.shape(unique_ids)[0] - array_ops.shape(id_is_zero)[0]) + 1
def no_zero():
# No need to insert a zero into the ids.
return nonzero_consecutive_ids
def has_zero():
# Insert a zero in the consecutive ids where zero appears in unique_ids.
# id_is_zero has length 1.
zero_id_ind = math_ops.to_int32(id_is_zero[0])
ids_before = nonzero_consecutive_ids[:zero_id_ind]
ids_after = nonzero_consecutive_ids[zero_id_ind:]
return array_ops.concat([ids_before, [0], ids_after], axis=0)
new_ids = control_flow_ops.cond(
math_ops.equal(array_ops.shape(id_is_zero)[0], 0), no_zero, has_zero)
components = array_ops.reshape(
array_ops.gather(new_ids, id_index), array_ops.shape(components))
if len(image_or_images.get_shape()) == 2:
return components[0, :, :]
else:
return components
ops.NotDifferentiable("BipartiteMatch")
ops.NotDifferentiable("ImageConnectedComponents")
| apache-2.0 |
simongoffin/my_odoo_tutorial | addons/crm/report/crm_phonecall_report.py | 16 | 4157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.addons.crm import crm
from openerp.osv import fields, osv
AVAILABLE_STATES = [
('draft', 'Draft'),
('open', 'Todo'),
('cancel', 'Cancelled'),
('done', 'Held'),
('pending', 'Pending')
]
class crm_phonecall_report(osv.osv):
""" Phone calls by user and section """
_name = "crm.phonecall.report"
_description = "Phone calls by user and section"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Section', readonly=True),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'nbr': fields.integer('# of Cases', readonly=True),
'state': fields.selection(AVAILABLE_STATES, 'Status', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'duration': fields.float('Duration', digits=(16,2),readonly=True, group_operator="avg"),
'delay_open': fields.float('Delay to open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.date('Opening Date', readonly=True, select=True),
'creation_date': fields.date('Creation Date', readonly=True, select=True),
'date_closed': fields.date('Close Date', readonly=True, select=True),
}
def init(self, cr):
""" Phone Calls By User And Section
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_phonecall_report')
cr.execute("""
create or replace view crm_phonecall_report as (
select
id,
to_char(c.create_date, 'YYYY-MM-DD') as creation_date,
to_char(c.date_open, 'YYYY-MM-DD') as opening_date,
to_char(c.date_closed, 'YYYY-mm-dd') as date_closed,
c.state,
c.user_id,
c.section_id,
c.categ_id,
c.partner_id,
c.duration,
c.company_id,
c.priority,
1 as nbr,
date_trunc('day',c.create_date) as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
from
crm_phonecall c
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pratikmallya/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/manifest.py | 84 | 1387 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#
from namespaces import MANIFESTNS
from element import Element
# Autogenerated
def Manifest(**args):
return Element(qname = (MANIFESTNS,'manifest'), **args)
def FileEntry(**args):
return Element(qname = (MANIFESTNS,'file-entry'), **args)
def EncryptionData(**args):
return Element(qname = (MANIFESTNS,'encryption-data'), **args)
def Algorithm(**args):
return Element(qname = (MANIFESTNS,'algorithm'), **args)
def KeyDerivation(**args):
return Element(qname = (MANIFESTNS,'key-derivation'), **args)
| apache-2.0 |
sureleo/leetcode | archive/python/math/MaxPointsOnALine.py | 2 | 1654 | # Definition for a point
class Point:
def __init__(self, a=0, b=0):
self.x = a
self.y = b
class Solution:
# @param points, a list of Points
# @return an integer
def maxPoints(self, points):
result = 0
for i in xrange(len(points)):
d = {}
duplicate = 0
for j in xrange(len(points)):
if i == j:
d["me"] = 1
continue
deltax = points[i].x - points[j].x
deltay = points[i].y - points[j].y
if deltax == 0:
if deltay == 0:
duplicate += 1
else:
if "inf" not in d:
d["inf"] = 1
else:
d["inf"] += 1
else:
deltay = points[i].y - points[j].y
slope = float(deltay) / float(deltax)
if slope not in d:
d[slope] = 1
else:
d[slope] += 1
for key in d:
# the point it self
if key != "me":
d[key] += 1
d[key] += duplicate
result = max(d[key], result)
return result
if __name__ == "__main__":
solution = Solution()
point0 = Point(1, 1)
point1 = Point(1, 1)
point2 = Point(2, 2)
point3 = Point(2, 2)
#points = [point0, point1, point2, point3]
points = [point0]
#points = [point0, point1]
print solution.maxPoints(points)
| mit |
kubeflow/kfp-tekton-backend | sdk/python/kfp/azure.py | 1 | 3009 | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def use_azure_secret(secret_name='azcreds'):
"""An operator that configures the container to use Azure user credentials.
The azcreds secret is created as part of the kubeflow deployment that
stores the client ID and secrets for the kubeflow azure service principal.
With this service principal, the container has a range of Azure APIs to
access to.
"""
def _use_azure_secret(task):
from kubernetes import client as k8s_client
(
task.container
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_SUBSCRIPTION_ID',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_SUBSCRIPTION_ID'
)
)
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_TENANT_ID',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_TENANT_ID'
)
)
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_CLIENT_ID',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_CLIENT_ID'
)
)
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='AZ_CLIENT_SECRET',
value_from=k8s_client.V1EnvVarSource(
secret_key_ref=k8s_client.V1SecretKeySelector(
name=secret_name,
key='AZ_CLIENT_SECRET'
)
)
)
)
)
return task
return _use_azure_secret
| apache-2.0 |
mortenthansen/linux | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
rockerbox/kazoo | docs/conf.py | 8 | 8004 | # -*- coding: utf-8 -*-
#
# kazoo documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 11 13:23:01 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
class Mock(object):
def __init__(self, *args):
pass
def __getattr__(self, name):
return Mock
MOCK_MODULES = ['zookeeper']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kazoo'
copyright = u'2011-2014, Kazoo team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kazoodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'kazoo.tex', u'kazoo Documentation',
u'Various Authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kazoo', u'kazoo Documentation',
[u'Various Authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kazoo', u'kazoo Documentation', u'Various Authors',
'kazoo', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| apache-2.0 |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_struct_fields.py | 264 | 1503 | import unittest
from ctypes import *
class StructFieldsTestCase(unittest.TestCase):
# Structure/Union classes must get 'finalized' sooner or
# later, when one of these things happen:
#
# 1. _fields_ is set.
# 2. An instance is created.
# 3. The type is used as field of another Structure/Union.
# 4. The type is subclassed
#
# When they are finalized, assigning _fields_ is no longer allowed.
def test_1_A(self):
class X(Structure):
pass
self.assertEqual(sizeof(X), 0) # not finalized
X._fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_1_B(self):
class X(Structure):
_fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_2(self):
class X(Structure):
pass
X()
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_3(self):
class X(Structure):
pass
class Y(Structure):
_fields_ = [("x", X)] # finalizes X
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_4(self):
class X(Structure):
pass
class Y(X):
pass
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
Y._fields_ = []
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
kevin-who/socket-chat | node_modules/node-gyp/gyp/pylib/gyp/common.py | 366 | 19638 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| mit |
rokups/Urho3D | Source/Tools/BindTool/util.py | 1 | 2213 | import os
import subprocess
def cpp_demangle(name):
return subprocess.check_output(['c++filt', name]).decode('utf-8').strip()
def split_identifier(identifier):
"""Splits string at _ or between lower case and uppercase letters."""
prev_split = 0
parts = []
if '_' in identifier:
parts = [s.lower() for s in identifier.split('_')]
else:
for i in range(len(identifier) - 1):
if identifier[i + 1].isupper():
parts.append(identifier[prev_split:i + 1].lower())
prev_split = i + 1
last = identifier[prev_split:]
if last:
parts.append(last.lower())
return parts
def camel_case(identifier):
identifier = identifier.strip('_')
return_string = False
if isinstance(identifier, str):
if identifier.isupper() and '_' not in identifier:
identifier = identifier.lower()
name_parts = split_identifier(identifier)
return_string = True
elif isinstance(identifier, (list, tuple)):
name_parts = identifier
else:
raise ValueError('identifier must be a list, tuple or string.')
for i in range(len(name_parts)):
name_parts[i] = name_parts[i][0].upper() + name_parts[i][1:]
if return_string:
return ''.join(name_parts)
return name_parts
def get_subsystem_name(src_path):
cwd = os.path.abspath('.')
rel_path = os.path.relpath(src_path, cwd)
subsystem = rel_path[:rel_path.index('/')]
if subsystem == '..':
subsystem = 'global'
return subsystem
def has_base_class(node, base_class_name):
for base in node.bases:
if base.cls is None or isinstance(base.cls, str):
continue
if base.cls.infer_fqn() == base_class_name:
return True
elif has_base_class(base.cls, base_class_name):
return True
return False
def find_value(n):
inner = n.get('inner', ())
if len(inner) == 1:
n = inner[0]
if n['kind'] in ('FloatingLiteral', 'IntegerLiteral', 'ImplicitCastExpr', 'ConstantExpr'):
value = n.get('value')
if value:
return value
return find_value(n)
| mit |
mhnatiuk/phd_sociology_of_religion | scrapper/build/Twisted/twisted/trial/_dist/workerreporter.py | 43 | 3922 | # -*- test-case-name: twisted.trial._dist.test.test_workerreporter -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test reporter forwarding test results over trial distributed AMP commands.
@since: 12.3
"""
from twisted.python.failure import Failure
from twisted.python.reflect import qual
from twisted.trial.reporter import TestResult
from twisted.trial._dist import managercommands
class WorkerReporter(TestResult):
"""
Reporter for trial's distributed workers. We send things not through a
stream, but through an C{AMP} protocol's C{callRemote} method.
"""
def __init__(self, ampProtocol):
"""
@param ampProtocol: The communication channel with the trial
distributed manager which collects all test results.
@type ampProtocol: C{AMP}
"""
super(WorkerReporter, self).__init__()
self.ampProtocol = ampProtocol
def _getFailure(self, error):
"""
Convert a C{sys.exc_info()}-style tuple to a L{Failure}, if necessary.
"""
if isinstance(error, tuple):
return Failure(error[1], error[0], error[2])
return error
def _getFrames(self, failure):
"""
Extract frames from a C{Failure} instance.
"""
frames = []
for frame in failure.frames:
frames.extend([frame[0], frame[1], str(frame[2])])
return frames
def addSuccess(self, test):
"""
Send a success over.
"""
super(WorkerReporter, self).addSuccess(test)
self.ampProtocol.callRemote(managercommands.AddSuccess,
testName=test.id())
def addError(self, test, error):
"""
Send an error over.
"""
super(WorkerReporter, self).addError(test, error)
failure = self._getFailure(error)
frames = self._getFrames(failure)
self.ampProtocol.callRemote(managercommands.AddError,
testName=test.id(),
error=failure.getErrorMessage(),
errorClass=qual(failure.type),
frames=frames)
def addFailure(self, test, fail):
"""
Send a Failure over.
"""
super(WorkerReporter, self).addFailure(test, fail)
failure = self._getFailure(fail)
frames = self._getFrames(failure)
self.ampProtocol.callRemote(managercommands.AddFailure,
testName=test.id(),
fail=failure.getErrorMessage(),
failClass=qual(failure.type),
frames=frames)
def addSkip(self, test, reason):
"""
Send a skip over.
"""
super(WorkerReporter, self).addSkip(test, reason)
self.ampProtocol.callRemote(managercommands.AddSkip,
testName=test.id(), reason=str(reason))
def addExpectedFailure(self, test, error, todo):
"""
Send an expected failure over.
"""
super(WorkerReporter, self).addExpectedFailure(test, error, todo)
self.ampProtocol.callRemote(managercommands.AddExpectedFailure,
testName=test.id(),
error=error.getErrorMessage(),
todo=todo.reason)
def addUnexpectedSuccess(self, test, todo):
"""
Send an unexpected success over.
"""
super(WorkerReporter, self).addUnexpectedSuccess(test, todo)
self.ampProtocol.callRemote(managercommands.AddUnexpectedSuccess,
testName=test.id(), todo=todo.reason)
def printSummary(self):
"""
I{Don't} print a summary
"""
| gpl-2.0 |
arowla/commando | commando/tests/test_conf.py | 5 | 1988 | from commando.conf import AutoProp, ConfigDict
class TestClass(AutoProp):
@AutoProp.default
def source(self):
return 'source'
def test_auto():
t = TestClass()
assert t.source == 'source'
def test_override():
t = TestClass()
t.source = 'source1'
assert t.source == 'source1'
t.source = 'source2'
assert t.source == 'source2'
t.source = None
assert t.source == 'source'
def test_init():
c = ConfigDict({"a": 1})
assert c.a == 1
assert c["a"] == 1
def test_change():
c = ConfigDict({"a": 1})
assert c.a == 1
c.a = 2
assert c["a"] == 2
def test_two_levels():
c = ConfigDict({"a": 1, "b": {"c": 3}})
assert c.b.c == 3
def test_two_levels_assignment():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = {"d": 5}
c.b = d
assert c.b.d == 5
assert c.b == d
def test_two_levels_patch():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = {"d": 5}
c.b.d = d
assert c.b.c == 3
assert c.b.d == d
def test_copy():
c = ConfigDict({"a": 1, "b": {"c": 3}})
d = c.copy()
assert c == d
c.b.c = 4
assert c != d
def test_list():
c = ConfigDict({"a": 1, "b": {"c": 3}})
c.d = [dict(e=1), dict(f=2)]
assert c.d[0].e == 1
assert c.d[1].f == 2
def test_operator():
c = ConfigDict({"a": 1, "b": {"c": 3}})
from operator import attrgetter
assert attrgetter('b.c')(c) == 3
def test_patch_simple():
c = ConfigDict({"a": 1, "b": {"c": 3, "e": 4}})
d = {"b": {"e": 5}}
c.patch(d)
assert c.b.c == 3
assert c.b.e == 5
def test_patch_complex():
c = ConfigDict({
"a": 1,
"b": {"x": 3, "y": 4},
"c": {"x": 5, "y": 6},
"d": {"x": 7, "y": 8}
})
d = {"a": 2, "b": {"z": 5}, "c": [1, 2], "d": {"y": 9}}
c.patch(d)
assert c.a == 2
assert c.b.x == 3
assert c.b.y == 4
assert c.b.z == 5
assert c.c == [1, 2]
assert c.d.x == 7
assert c.d.y == 9
| mit |
jss-emr/openerp-7-src | openerp/addons/l10n_in_hr_payroll/wizard/hr_salary_employee_bymonth.py | 51 | 2829 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_salary_employee_bymonth(osv.osv_memory):
_name = 'hr.salary.employee.month'
_description = 'Hr Salary Employee By Month Report'
_columns = {
'start_date': fields.date('Start Date', required=True),
'end_date': fields.date('End Date', required=True),
'employee_ids': fields.many2many('hr.employee', 'payroll_year_rel', 'payroll_year_id', 'employee_id', 'Employees', required=True),
'category_id': fields.many2one('hr.salary.rule.category', 'Category', required=True),
}
def _get_default_category(self, cr, uid, context=None):
category_ids = self.pool.get('hr.salary.rule.category').search(cr, uid, [('code', '=', 'NET')], context=context)
return category_ids and category_ids[0] or False
_defaults = {
'start_date': lambda *a: time.strftime('%Y-01-01'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'category_id': _get_default_category
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return {
'type': 'ir.actions.report.xml',
'report_name': 'salary.employee.bymonth',
'datas': datas,
}
hr_salary_employee_bymonth()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
VaibhavAgarwalVA/sympy | sympy/physics/optics/medium.py | 93 | 4875 | """
**Contains**
* Medium
"""
from __future__ import division
__all__ = ['Medium']
from sympy import Symbol, sympify, sqrt
from sympy.physics.units import c, u0, e0
class Medium(Symbol):
"""
This class represents an optical medium. The prime reason to implement this is
to facilitate refraction, Fermat's priciple, etc.
An optical medium is a material through which electromagnetic waves propagate.
The permittivity and permeability of the medium define how electromagnetic
waves propagate in it.
Parameters
==========
name: string
The display name of the Medium.
permittivity: Sympifyable
Electric permittivity of the space.
permeability: Sympifyable
Magnetic permeability of the space.
n: Sympifyable
Index of refraction of the medium.
Examples
========
>>> from sympy.abc import epsilon, mu
>>> from sympy.physics.optics import Medium
>>> m1 = Medium('m1')
>>> m2 = Medium('m2', epsilon, mu)
>>> m1.intrinsic_impedance
149896229*pi*kg*m**2/(1250000*A**2*s**3)
>>> m2.refractive_index
299792458*m*sqrt(epsilon*mu)/s
References
==========
.. [1] http://en.wikipedia.org/wiki/Optical_medium
"""
def __new__(cls, name, permittivity=None, permeability=None, n=None):
obj = super(Medium, cls).__new__(cls, name)
obj._permittivity = sympify(permittivity)
obj._permeability = sympify(permeability)
obj._n = sympify(n)
if n is not None:
if permittivity != None and permeability == None:
obj._permeability = n**2/(c**2*obj._permittivity)
if permeability != None and permittivity == None:
obj._permittivity = n**2/(c**2*obj._permeability)
if permittivity != None and permittivity != None:
if abs(n - c*sqrt(obj._permittivity*obj._permeability)) > 1e-6:
raise ValueError("Values are not consistent.")
elif permittivity is not None and permeability is not None:
obj._n = c*sqrt(permittivity*permeability)
elif permittivity is None and permeability is None:
obj._permittivity = e0
obj._permeability = u0
return obj
@property
def intrinsic_impedance(self):
"""
Returns intrinsic impedance of the medium.
The intrinsic impedance of a medium is the ratio of the
transverse components of the electric and magnetic fields
of the electromagnetic wave travelling in the medium.
In a region with no electrical conductivity it simplifies
to the square root of ratio of magnetic permeability to
electric permittivity.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.intrinsic_impedance
149896229*pi*kg*m**2/(1250000*A**2*s**3)
"""
return sqrt(self._permeability/self._permittivity)
@property
def speed(self):
"""
Returns speed of the electromagnetic wave travelling in the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.speed
299792458*m/s
"""
return 1/sqrt(self._permittivity*self._permeability)
@property
def refractive_index(self):
"""
Returns refractive index of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.refractive_index
1
"""
return c/self.speed
@property
def permittivity(self):
"""
Returns electric permittivity of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permittivity
625000*A**2*s**4/(22468879468420441*pi*kg*m**3)
"""
return self._permittivity
@property
def permeability(self):
"""
Returns magnetic permeability of the medium.
Examples
========
>>> from sympy.physics.optics import Medium
>>> m = Medium('m')
>>> m.permeability
pi*kg*m/(2500000*A**2*s**2)
"""
return self._permeability
def __str__(self):
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
def __lt__(self, other):
"""
Compares based on refractive index of the medium.
"""
return self.refractive_index < other.refractive_index
def __gt__(self, other):
return not self.__lt__(other)
def __eq__(self, other):
return self.refractive_index == other.refractive_index
def __ne__(self, other):
return not self.__eq__(other)
| bsd-3-clause |
zzzirk/boto | boto/ses/exceptions.py | 151 | 1830 | """
Various exceptions that are specific to the SES module.
"""
from boto.exception import BotoServerError
class SESError(BotoServerError):
"""
Sub-class all SES-related errors from here. Don't raise this error
directly from anywhere. The only thing this gets us is the ability to
catch SESErrors separately from the more generic, top-level
BotoServerError exception.
"""
pass
class SESAddressNotVerifiedError(SESError):
"""
Raised when a "Reply-To" address has not been validated in SES yet.
"""
pass
class SESIdentityNotVerifiedError(SESError):
"""
Raised when an identity (domain or address) has not been verified in SES yet.
"""
pass
class SESDomainNotConfirmedError(SESError):
"""
"""
pass
class SESAddressBlacklistedError(SESError):
"""
After you attempt to send mail to an address, and delivery repeatedly
fails, said address is blacklisted for at least 24 hours. The blacklisting
eventually expires, and you are able to attempt delivery again. If you
attempt to send mail to a blacklisted email, this is raised.
"""
pass
class SESDailyQuotaExceededError(SESError):
"""
Your account's daily (rolling 24 hour total) allotment of outbound emails
has been exceeded.
"""
pass
class SESMaxSendingRateExceededError(SESError):
"""
Your account's requests/second limit has been exceeded.
"""
pass
class SESDomainEndsWithDotError(SESError):
"""
Recipient's email address' domain ends with a period/dot.
"""
pass
class SESLocalAddressCharacterError(SESError):
"""
An address contained a control or whitespace character.
"""
pass
class SESIllegalAddressError(SESError):
"""
Raised when an illegal address is encountered.
"""
pass
| mit |
robinro/ansible | lib/ansible/playbook/loop_control.py | 66 | 1328 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
class LoopControl(Base):
_loop_var = FieldAttribute(isa='str')
_label = FieldAttribute(isa='str')
_pause = FieldAttribute(isa='int')
def __init__(self):
super(LoopControl, self).__init__()
@staticmethod
def load(data, variable_manager=None, loader=None):
t = LoopControl()
return t.load_data(data, variable_manager=variable_manager, loader=loader)
| gpl-3.0 |
soldag/home-assistant | homeassistant/components/nexia/sensor.py | 10 | 7063 | """Support for Nexia / Trane XL Thermostats."""
from nexia.const import UNIT_CELSIUS
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from .const import DOMAIN, NEXIA_DEVICE, UPDATE_COORDINATOR
from .entity import NexiaThermostatEntity, NexiaThermostatZoneEntity
from .util import percent_conv
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up sensors for a Nexia device."""
nexia_data = hass.data[DOMAIN][config_entry.entry_id]
nexia_home = nexia_data[NEXIA_DEVICE]
coordinator = nexia_data[UPDATE_COORDINATOR]
entities = []
# Thermostat / System Sensors
for thermostat_id in nexia_home.get_thermostat_ids():
thermostat = nexia_home.get_thermostat_by_id(thermostat_id)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_system_status",
"System Status",
None,
None,
)
)
# Air cleaner
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_air_cleaner_mode",
"Air Cleaner Mode",
None,
None,
)
)
# Compressor Speed
if thermostat.has_variable_speed_compressor():
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_current_compressor_speed",
"Current Compressor Speed",
None,
PERCENTAGE,
percent_conv,
)
)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_requested_compressor_speed",
"Requested Compressor Speed",
None,
PERCENTAGE,
percent_conv,
)
)
# Outdoor Temperature
if thermostat.has_outdoor_temperature():
unit = (
TEMP_CELSIUS
if thermostat.get_unit() == UNIT_CELSIUS
else TEMP_FAHRENHEIT
)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_outdoor_temperature",
"Outdoor Temperature",
DEVICE_CLASS_TEMPERATURE,
unit,
)
)
# Relative Humidity
if thermostat.has_relative_humidity():
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_relative_humidity",
"Relative Humidity",
DEVICE_CLASS_HUMIDITY,
PERCENTAGE,
percent_conv,
)
)
# Zone Sensors
for zone_id in thermostat.get_zone_ids():
zone = thermostat.get_zone_by_id(zone_id)
unit = (
TEMP_CELSIUS
if thermostat.get_unit() == UNIT_CELSIUS
else TEMP_FAHRENHEIT
)
# Temperature
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_temperature",
"Temperature",
DEVICE_CLASS_TEMPERATURE,
unit,
None,
)
)
# Zone Status
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_status",
"Zone Status",
None,
None,
)
)
# Setpoint Status
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_setpoint_status",
"Zone Setpoint Status",
None,
None,
)
)
async_add_entities(entities, True)
class NexiaThermostatSensor(NexiaThermostatEntity):
"""Provides Nexia thermostat sensor support."""
def __init__(
self,
coordinator,
thermostat,
sensor_call,
sensor_name,
sensor_class,
sensor_unit,
modifier=None,
):
"""Initialize the sensor."""
super().__init__(
coordinator,
thermostat,
name=f"{thermostat.get_name()} {sensor_name}",
unique_id=f"{thermostat.thermostat_id}_{sensor_call}",
)
self._call = sensor_call
self._class = sensor_class
self._state = None
self._unit_of_measurement = sensor_unit
self._modifier = modifier
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._class
@property
def state(self):
"""Return the state of the sensor."""
val = getattr(self._thermostat, self._call)()
if self._modifier:
val = self._modifier(val)
if isinstance(val, float):
val = round(val, 1)
return val
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
class NexiaThermostatZoneSensor(NexiaThermostatZoneEntity):
"""Nexia Zone Sensor Support."""
def __init__(
self,
coordinator,
zone,
sensor_call,
sensor_name,
sensor_class,
sensor_unit,
modifier=None,
):
"""Create a zone sensor."""
super().__init__(
coordinator,
zone,
name=f"{zone.get_name()} {sensor_name}",
unique_id=f"{zone.zone_id}_{sensor_call}",
)
self._call = sensor_call
self._class = sensor_class
self._state = None
self._unit_of_measurement = sensor_unit
self._modifier = modifier
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._class
@property
def state(self):
"""Return the state of the sensor."""
val = getattr(self._zone, self._call)()
if self._modifier:
val = self._modifier(val)
if isinstance(val, float):
val = round(val, 1)
return val
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
| apache-2.0 |
samsu/neutron | tests/unit/services/vpn/test_vpnaas_driver_plugin.py | 12 | 7430 | # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import constants
from neutron import context
from neutron.db.vpn import vpn_validator
from neutron import manager
from neutron.plugins.common import constants as p_constants
from neutron.services.vpn.service_drivers import ipsec as ipsec_driver
from neutron.tests.unit.db.vpn import test_db_vpnaas
from neutron.tests.unit.openvswitch import test_agent_scheduler
from neutron.tests.unit import test_agent_ext_plugin
FAKE_HOST = test_agent_ext_plugin.L3_HOSTA
VPN_DRIVER_CLASS = 'neutron.services.vpn.plugin.VPNDriverPlugin'
class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas,
test_agent_scheduler.AgentSchedulerTestMixIn,
test_agent_ext_plugin.AgentDBTestMixIn):
def setUp(self):
self.adminContext = context.get_admin_context()
driver_cls_p = mock.patch(
'neutron.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver')
driver_cls = driver_cls_p.start()
self.driver = mock.Mock()
self.driver.service_type = ipsec_driver.IPSEC
self.driver.validator = vpn_validator.VpnReferenceValidator()
driver_cls.return_value = self.driver
super(TestVPNDriverPlugin, self).setUp(
vpnaas_plugin=VPN_DRIVER_CLASS)
def test_create_ipsec_site_connection(self, **extras):
super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection()
self.driver.create_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
self.driver.delete_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
def test_delete_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_delete_vpnservice()
self.driver.delete_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY)
def test_update_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_update_vpnservice()
self.driver.update_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
@contextlib.contextmanager
def vpnservice_set(self):
"""Test case to create a ipsec_site_connection."""
vpnservice_name = "vpn1"
ipsec_site_connection_name = "ipsec_site_connection"
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
description = "my-vpn-connection"
keys = {'name': vpnservice_name,
'description': "my-vpn-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'dpd_action': 'hold',
'dpd_interval': 40,
'dpd_timeout': 120,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
with self.ikepolicy(name=ikename) as ikepolicy:
with self.ipsecpolicy(name=ipsecname) as ipsecpolicy:
with self.subnet() as subnet:
with self.router() as router:
plugin = manager.NeutronManager.get_plugin()
agent = {'host': FAKE_HOST,
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'fake-binary',
'topic': 'fake-topic'}
plugin.create_or_update_agent(self.adminContext, agent)
plugin.schedule_router(
self.adminContext, router['router']['id'])
with self.vpnservice(name=vpnservice_name,
subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = (
ipsecpolicy['ipsecpolicy']['id']
)
keys['vpnservice_id'] = (
vpnservice1['vpnservice']['id']
)
with self.ipsec_site_connection(
self.fmt,
ipsec_site_connection_name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['dpd_action'],
keys['dpd_interval'],
keys['dpd_timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
):
yield vpnservice1['vpnservice']
def test_get_agent_hosting_vpn_services(self):
with self.vpnservice_set():
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservices = vpnservices.all()
self.assertEqual(1, len(vpnservices))
vpnservice_db = vpnservices[0]
self.assertEqual(1, len(vpnservice_db.ipsec_site_connections))
ipsec_site_connection = vpnservice_db.ipsec_site_connections[0]
self.assertIsNotNone(
ipsec_site_connection['ikepolicy'])
self.assertIsNotNone(
ipsec_site_connection['ipsecpolicy'])
def test_update_status(self):
with self.vpnservice_set() as vpnservice:
self._register_agent_states()
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
service_plugin.update_status_by_agent(
self.adminContext,
[{'status': 'ACTIVE',
'ipsec_site_connections': {},
'updated_pending_status': True,
'id': vpnservice['id']}])
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservice_db = vpnservices[0]
self.assertEqual(p_constants.ACTIVE, vpnservice_db['status'])
| apache-2.0 |
Openlights/firemix | plugins/fixture_step.py | 1 | 2195 | # This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <[email protected]>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
from builtins import range
import numpy as np
from lib.transition import Transition
from lib.buffer_utils import BufferUtils, struct_flat
class FixtureStep(Transition):
"""
"""
def __init__(self, app):
Transition.__init__(self, app)
def __str__(self):
return "Fixture Step"
def reset(self):
self.fixtures = self._app.scene.fixtures()
buffer_size = BufferUtils.get_buffer_size()
self.mask = np.tile(False, buffer_size)
np.random.seed()
self.rand_index = np.arange(len(self.fixtures))
np.random.shuffle(self.rand_index)
self.last_idx = 0
def render(self, start, end, progress, out):
start[self.mask] = (0.0, 0.0, 0.0)
end[np.invert(self.mask)] = (0.0, 0.0, 0.0)
idx = int(progress * len(self.rand_index))
if idx >= self.last_idx:
for i in range(self.last_idx, idx):
fix = self.fixtures[self.rand_index[i]]
pix_start, pix_end = BufferUtils.get_fixture_extents(fix.strand, fix.address)
self.mask[pix_start:pix_end] = True
else:
for i in range(idx, self.last_idx):
fix = self.fixtures[self.rand_index[i]]
pix_start, pix_end = BufferUtils.get_fixture_extents(fix.strand, fix.address)
self.mask[pix_start:pix_end]= False
self.last_idx = idx
np.add(struct_flat(start), struct_flat(end), struct_flat(out))
| gpl-3.0 |
sgallagher/anaconda | pyanaconda/modules/storage/storage.py | 1 | 14418 | #
# Kickstart module for the storage.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.core.signal import Signal
from pyanaconda.core.dbus import DBus
from pyanaconda.core.storage import blivet_version
from pyanaconda.modules.common.base import KickstartService
from pyanaconda.modules.common.constants.services import STORAGE
from pyanaconda.modules.common.containers import TaskContainer
from pyanaconda.modules.common.errors.storage import InvalidStorageError
from pyanaconda.modules.common.structures.requirement import Requirement
from pyanaconda.modules.storage.bootloader import BootloaderModule
from pyanaconda.modules.storage.checker import StorageCheckerModule
from pyanaconda.modules.storage.dasd import DASDModule
from pyanaconda.modules.storage.devicetree import DeviceTreeModule, create_storage
from pyanaconda.modules.storage.disk_initialization import DiskInitializationModule
from pyanaconda.modules.storage.disk_selection import DiskSelectionModule
from pyanaconda.modules.storage.fcoe import FCOEModule
from pyanaconda.modules.storage.installation import MountFilesystemsTask, CreateStorageLayoutTask, \
WriteConfigurationTask
from pyanaconda.modules.storage.iscsi import ISCSIModule
from pyanaconda.modules.storage.kickstart import StorageKickstartSpecification
from pyanaconda.modules.storage.nvdimm import NVDIMMModule
from pyanaconda.modules.storage.partitioning.constants import PartitioningMethod
from pyanaconda.modules.storage.partitioning.factory import PartitioningFactory
from pyanaconda.modules.storage.partitioning.validate import StorageValidateTask
from pyanaconda.modules.storage.reset import ScanDevicesTask
from pyanaconda.modules.storage.snapshot import SnapshotModule
from pyanaconda.modules.storage.storage_interface import StorageInterface
from pyanaconda.modules.storage.teardown import UnmountFilesystemsTask, TeardownDiskImagesTask
from pyanaconda.modules.storage.zfcp import ZFCPModule
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
class StorageService(KickstartService):
"""The Storage service."""
def __init__(self):
super().__init__()
# The storage model.
self._current_storage = None
self._storage_playground = None
self.storage_changed = Signal()
# The created partitioning modules.
self._created_partitioning = []
self.created_partitioning_changed = Signal()
# The applied partitioning module.
self._applied_partitioning = None
self.applied_partitioning_changed = Signal()
self.partitioning_reset = Signal()
# Initialize modules.
self._modules = []
self._storage_checker_module = StorageCheckerModule()
self._add_module(self._storage_checker_module)
self._device_tree_module = DeviceTreeModule()
self._add_module(self._device_tree_module)
self._disk_init_module = DiskInitializationModule()
self._add_module(self._disk_init_module)
self._disk_selection_module = DiskSelectionModule()
self._add_module(self._disk_selection_module)
self._snapshot_module = SnapshotModule()
self._add_module(self._snapshot_module)
self._bootloader_module = BootloaderModule()
self._add_module(self._bootloader_module)
self._fcoe_module = FCOEModule()
self._add_module(self._fcoe_module)
self._iscsi_module = ISCSIModule()
self._add_module(self._iscsi_module)
self._nvdimm_module = NVDIMMModule()
self._add_module(self._nvdimm_module)
self._dasd_module = DASDModule()
self._add_module(self._dasd_module)
self._zfcp_module = ZFCPModule()
self._add_module(self._zfcp_module)
# Connect modules to signals.
self.storage_changed.connect(
self._device_tree_module.on_storage_changed
)
self.storage_changed.connect(
self._disk_init_module.on_storage_changed
)
self.storage_changed.connect(
self._disk_selection_module.on_storage_changed
)
self.storage_changed.connect(
self._snapshot_module.on_storage_changed
)
self.storage_changed.connect(
self._bootloader_module.on_storage_changed
)
self.storage_changed.connect(
self._dasd_module.on_storage_changed
)
self._disk_init_module.format_unrecognized_enabled_changed.connect(
self._dasd_module.on_format_unrecognized_enabled_changed
)
self._disk_init_module.format_ldl_enabled_changed.connect(
self._dasd_module.on_format_ldl_enabled_changed
)
self._disk_selection_module.protected_devices_changed.connect(
self.on_protected_devices_changed
)
# After connecting modules to signals, create the initial
# storage model. It will be propagated to all modules.
self._set_storage(create_storage())
def _add_module(self, storage_module):
"""Add a base kickstart module."""
self._modules.append(storage_module)
def publish(self):
"""Publish the module."""
TaskContainer.set_namespace(STORAGE.namespace)
for kickstart_module in self._modules:
kickstart_module.publish()
DBus.publish_object(STORAGE.object_path, StorageInterface(self))
DBus.register_service(STORAGE.service_name)
@property
def kickstart_specification(self):
"""Return the kickstart specification."""
return StorageKickstartSpecification
def process_kickstart(self, data):
"""Process the kickstart data."""
# Process the kickstart data in modules.
for kickstart_module in self._modules:
kickstart_module.process_kickstart(data)
# Set the default filesystem type.
if data.autopart.autopart and data.autopart.fstype:
self.storage.set_default_fstype(data.autopart.fstype)
# Create a new partitioning module.
partitioning_method = PartitioningFactory.get_method_for_kickstart(data)
if partitioning_method:
partitioning_module = self.create_partitioning(partitioning_method)
partitioning_module.process_kickstart(data)
def setup_kickstart(self, data):
"""Set up the kickstart data."""
for kickstart_module in self._modules:
kickstart_module.setup_kickstart(data)
if self.applied_partitioning:
self.applied_partitioning.setup_kickstart(data)
def generate_kickstart(self):
"""Generate kickstart string representation of this module's data
Adds Blivet version to the output because most of the strings come from Blivet anyway.
"""
return "# Generated using Blivet version {}\n{}".format(
blivet_version,
super().generate_kickstart()
)
@property
def storage(self):
"""The storage model.
:return: an instance of Blivet
"""
if self._storage_playground:
return self._storage_playground
if not self._current_storage:
self._set_storage(create_storage())
return self._current_storage
def _set_storage(self, storage):
"""Set the current storage model.
The current storage is the latest model of
the system’s storage configuration created
by scanning all devices.
:param storage: a storage
"""
self._current_storage = storage
if self._storage_playground:
return
self.storage_changed.emit(storage)
log.debug("The storage model has changed.")
def _set_storage_playground(self, storage):
"""Set the storage playground.
The storage playground is a model of a valid
partitioned storage configuration, that can be
used for an installation.
:param storage: a storage or None
"""
self._storage_playground = storage
if storage is None:
storage = self.storage
self.storage_changed.emit(storage)
log.debug("The storage model has changed.")
def on_protected_devices_changed(self, protected_devices):
"""Update the protected devices in the storage model."""
if not self._current_storage:
return
self.storage.protect_devices(protected_devices)
def scan_devices_with_task(self):
"""Scan all devices with a task.
We will reset a copy of the current storage model
and switch the models if the reset is successful.
:return: a task
"""
# Copy the storage.
storage = self.storage.copy()
# Set up the storage.
storage.ignored_disks = self._disk_selection_module.ignored_disks
storage.exclusive_disks = self._disk_selection_module.exclusive_disks
storage.protected_devices = self._disk_selection_module.protected_devices
storage.disk_images = self._disk_selection_module.disk_images
# Create the task.
task = ScanDevicesTask(storage)
task.succeeded_signal.connect(lambda: self._set_storage(storage))
return task
def create_partitioning(self, method: PartitioningMethod):
"""Create a new partitioning.
Allowed values:
AUTOMATIC
CUSTOM
MANUAL
INTERACTIVE
BLIVET
:param PartitioningMethod method: a partitioning method
:return: a partitioning module
"""
module = PartitioningFactory.create_partitioning(method)
# Update the module.
module.on_storage_changed(
self._current_storage
)
module.on_selected_disks_changed(
self._disk_selection_module.selected_disks
)
# Connect the callbacks to signals.
self.storage_changed.connect(
module.on_storage_changed
)
self.partitioning_reset.connect(
module.on_partitioning_reset
)
self._disk_selection_module.selected_disks_changed.connect(
module.on_selected_disks_changed
)
# Update the list of modules.
self._add_created_partitioning(module)
return module
@property
def created_partitioning(self):
"""List of all created partitioning modules."""
return self._created_partitioning
def _add_created_partitioning(self, module):
"""Add a created partitioning module."""
self._created_partitioning.append(module)
self.created_partitioning_changed.emit(module)
log.debug("Created the partitioning %s.", module)
def apply_partitioning(self, module):
"""Apply a partitioning.
:param module: a partitioning module
:raise: InvalidStorageError of the partitioning is not valid
"""
# Validate the partitioning.
storage = module.storage.copy()
task = StorageValidateTask(storage)
report = task.run()
if not report.is_valid():
raise InvalidStorageError(" ".join(report.error_messages))
# Apply the partitioning.
self._set_storage_playground(storage)
self._set_applied_partitioning(module)
@property
def applied_partitioning(self):
"""The applied partitioning."""
return self._applied_partitioning
def _set_applied_partitioning(self, module):
"""Set the applied partitioning.
:param module: a partitioning module or None
"""
self._applied_partitioning = module
self.applied_partitioning_changed.emit()
if module is None:
module = "NONE"
log.debug("The partitioning %s is applied.", module)
def reset_partitioning(self):
"""Reset the partitioning."""
self._set_storage_playground(None)
self._set_applied_partitioning(None)
self.partitioning_reset.emit()
def collect_requirements(self):
"""Return installation requirements for this module.
:return: a list of requirements
"""
requirements = []
# Add the storage requirements.
for name in self.storage.packages:
requirements.append(Requirement.for_package(
name, reason="Required to manage storage devices."
))
# Add other requirements, for example for bootloader.
for kickstart_module in self._modules:
requirements.extend(kickstart_module.collect_requirements())
return requirements
def install_with_tasks(self):
"""Returns installation tasks of this module.
:returns: list of installation tasks
"""
storage = self.storage
return [
CreateStorageLayoutTask(storage),
MountFilesystemsTask(storage)
]
def write_configuration_with_task(self):
"""Write the storage configuration with a task.
FIXME: This is a temporary workaround.
:return: an installation task
"""
return WriteConfigurationTask(self.storage)
def teardown_with_tasks(self):
"""Returns teardown tasks for this module.
:return: a list installation tasks
"""
storage = self.storage
return [
UnmountFilesystemsTask(storage),
TeardownDiskImagesTask(storage)
]
| gpl-2.0 |
kustodian/ansible | lib/ansible/modules/network/aci/aci_rest.py | 13 | 14126 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_rest
short_description: Direct access to the Cisco APIC REST API
description:
- Enables the management of the Cisco ACI fabric through direct access to the Cisco APIC REST API.
- Thanks to the idempotent nature of the APIC, this module is idempotent and reports changes.
version_added: '2.4'
requirements:
- lxml (when using XML payload)
- xmljson >= 0.1.8 (when using XML payload)
- python 2.7+ (when using xmljson)
options:
method:
description:
- The HTTP method of the request.
- Using C(delete) is typically used for deleting objects.
- Using C(get) is typically used for querying objects.
- Using C(post) is typically used for modifying objects.
type: str
choices: [ delete, get, post ]
default: get
aliases: [ action ]
path:
description:
- URI being used to execute API calls.
- Must end in C(.xml) or C(.json).
type: str
required: yes
aliases: [ uri ]
content:
description:
- When used instead of C(src), sets the payload of the API request directly.
- This may be convenient to template simple requests.
- For anything complex use the C(template) lookup plugin (see examples)
or the M(template) module with parameter C(src).
type: raw
src:
description:
- Name of the absolute path of the filename that includes the body
of the HTTP request being sent to the ACI fabric.
- If you require a templated payload, use the C(content) parameter
together with the C(template) lookup plugin, or use M(template).
type: path
aliases: [ config_file ]
extends_documentation_fragment: aci
notes:
- Certain payloads are known not to be idempotent, so be careful when constructing payloads,
e.g. using C(status="created") will cause idempotency issues, use C(status="modified") instead.
More information in :ref:`the ACI documentation <aci_guide_known_issues>`.
- Certain payloads (and used paths) are known to report no changes happened when changes did happen.
This is a known APIC problem and has been reported to the vendor. A workaround for this issue exists.
More information in :ref:`the ACI documentation <aci_guide_known_issues>`.
- XML payloads require the C(lxml) and C(xmljson) python libraries. For JSON payloads nothing special is needed.
seealso:
- module: aci_tenant
- name: Cisco APIC REST API Configuration Guide
description: More information about the APIC REST API.
link: http://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/2-x/rest_cfg/2_1_x/b_Cisco_APIC_REST_API_Configuration_Guide.html
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Add a tenant using certificate authentication
aci_rest:
host: apic
username: admin
private_key: pki/admin.key
method: post
path: /api/mo/uni.xml
src: /home/cisco/ansible/aci/configs/aci_config.xml
delegate_to: localhost
- name: Add a tenant from a templated payload file from templates/
aci_rest:
host: apic
username: admin
private_key: pki/admin.key
method: post
path: /api/mo/uni.xml
content: "{{ lookup('template', 'aci/tenant.xml.j2') }}"
delegate_to: localhost
- name: Add a tenant using inline YAML
aci_rest:
host: apic
username: admin
private_key: pki/admin.key
validate_certs: no
path: /api/mo/uni.json
method: post
content:
fvTenant:
attributes:
name: Sales
descr: Sales department
delegate_to: localhost
- name: Add a tenant using a JSON string
aci_rest:
host: apic
username: admin
private_key: pki/admin.key
validate_certs: no
path: /api/mo/uni.json
method: post
content:
{
"fvTenant": {
"attributes": {
"name": "Sales",
"descr": "Sales department"
}
}
}
delegate_to: localhost
- name: Add a tenant using an XML string
aci_rest:
host: apic
username: admin
private_key: pki/{{ aci_username }}.key
validate_certs: no
path: /api/mo/uni.xml
method: post
content: '<fvTenant name="Sales" descr="Sales departement"/>'
delegate_to: localhost
- name: Get tenants using password authentication
aci_rest:
host: apic
username: admin
password: SomeSecretPassword
method: get
path: /api/node/class/fvTenant.json
delegate_to: localhost
register: query_result
- name: Configure contracts
aci_rest:
host: apic
username: admin
private_key: pki/admin.key
method: post
path: /api/mo/uni.xml
src: /home/cisco/ansible/aci/configs/contract_config.xml
delegate_to: localhost
- name: Register leaves and spines
aci_rest:
host: apic
username: admin
private_key: pki/admin.key
validate_certs: no
method: post
path: /api/mo/uni/controller/nodeidentpol.xml
content: |
<fabricNodeIdentPol>
<fabricNodeIdentP name="{{ item.name }}" nodeId="{{ item.nodeid }}" status="{{ item.status }}" serial="{{ item.serial }}"/>
</fabricNodeIdentPol>
with_items:
- '{{ apic_leavesspines }}'
delegate_to: localhost
- name: Wait for all controllers to become ready
aci_rest:
host: apic
username: admin
private_key: pki/admin.key
validate_certs: no
path: /api/node/class/topSystem.json?query-target-filter=eq(topSystem.role,"controller")
register: apics
until: "'totalCount' in apics and apics.totalCount|int >= groups['apic']|count"
retries: 120
delay: 30
delegate_to: localhost
run_once: yes
'''
RETURN = r'''
error_code:
description: The REST ACI return code, useful for troubleshooting on failure
returned: always
type: int
sample: 122
error_text:
description: The REST ACI descriptive text, useful for troubleshooting on failure
returned: always
type: str
sample: unknown managed object class foo
imdata:
description: Converted output returned by the APIC REST (register this for post-processing)
returned: always
type: str
sample: [{"error": {"attributes": {"code": "122", "text": "unknown managed object class foo"}}}]
payload:
description: The (templated) payload send to the APIC REST API (xml or json)
returned: always
type: str
sample: '<foo bar="boo"/>'
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
response:
description: HTTP response string
returned: always
type: str
sample: 'HTTP Error 400: Bad Request'
status:
description: HTTP status code
returned: always
type: int
sample: 400
totalCount:
description: Number of items in the imdata array
returned: always
type: str
sample: '0'
url:
description: URL used for APIC REST call
returned: success
type: str
sample: https://1.2.3.4/api/mo/uni/tn-[Dag].json?rsp-subtree=modified
'''
import json
import os
try:
from ansible.module_utils.six.moves.urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
HAS_URLPARSE = True
except Exception:
HAS_URLPARSE = False
# Optional, only used for XML payload
try:
import lxml.etree # noqa
HAS_LXML_ETREE = True
except ImportError:
HAS_LXML_ETREE = False
# Optional, only used for XML payload
try:
from xmljson import cobra # noqa
HAS_XMLJSON_COBRA = True
except ImportError:
HAS_XMLJSON_COBRA = False
# Optional, only used for YAML validation
try:
import yaml
HAS_YAML = True
except Exception:
HAS_YAML = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
def update_qsl(url, params):
''' Add or update a URL query string '''
if HAS_URLPARSE:
url_parts = list(urlparse(url))
query = dict(parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urlencode(query)
return urlunparse(url_parts)
elif '?' in url:
return url + '&' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
return url + '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
class ACIRESTModule(ACIModule):
def changed(self, d):
''' Check ACI response for changes '''
if isinstance(d, dict):
for k, v in d.items():
if k == 'status' and v in ('created', 'modified', 'deleted'):
return True
elif self.changed(v) is True:
return True
elif isinstance(d, list):
for i in d:
if self.changed(i) is True:
return True
return False
def response_type(self, rawoutput, rest_type='xml'):
''' Handle APIC response output '''
if rest_type == 'json':
self.response_json(rawoutput)
else:
self.response_xml(rawoutput)
# Use APICs built-in idempotency
if HAS_URLPARSE:
self.result['changed'] = self.changed(self.imdata)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
path=dict(type='str', required=True, aliases=['uri']),
method=dict(type='str', default='get', choices=['delete', 'get', 'post'], aliases=['action']),
src=dict(type='path', aliases=['config_file']),
content=dict(type='raw'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['content', 'src']],
)
content = module.params.get('content')
path = module.params.get('path')
src = module.params.get('src')
# Report missing file
file_exists = False
if src:
if os.path.isfile(src):
file_exists = True
else:
module.fail_json(msg="Cannot find/access src '%s'" % src)
# Find request type
if path.find('.xml') != -1:
rest_type = 'xml'
if not HAS_LXML_ETREE:
module.fail_json(msg='The lxml python library is missing, or lacks etree support.')
if not HAS_XMLJSON_COBRA:
module.fail_json(msg='The xmljson python library is missing, or lacks cobra support.')
elif path.find('.json') != -1:
rest_type = 'json'
else:
module.fail_json(msg='Failed to find REST API payload type (neither .xml nor .json).')
aci = ACIRESTModule(module)
aci.result['status'] = -1 # Ensure we always return a status
# We include the payload as it may be templated
payload = content
if file_exists:
with open(src, 'r') as config_object:
# TODO: Would be nice to template this, requires action-plugin
payload = config_object.read()
# Validate payload
if rest_type == 'json':
if content and isinstance(content, dict):
# Validate inline YAML/JSON
payload = json.dumps(payload)
elif payload and isinstance(payload, str) and HAS_YAML:
try:
# Validate YAML/JSON string
payload = json.dumps(yaml.safe_load(payload))
except Exception as e:
module.fail_json(msg='Failed to parse provided JSON/YAML payload: %s' % to_text(e), exception=to_text(e), payload=payload)
elif rest_type == 'xml' and HAS_LXML_ETREE:
if content and isinstance(content, dict) and HAS_XMLJSON_COBRA:
# Validate inline YAML/JSON
# FIXME: Converting from a dictionary to XML is unsupported at this time
# payload = etree.tostring(payload)
pass
elif payload and isinstance(payload, str):
try:
# Validate XML string
payload = lxml.etree.tostring(lxml.etree.fromstring(payload))
except Exception as e:
module.fail_json(msg='Failed to parse provided XML payload: %s' % to_text(e), payload=payload)
# Perform actual request using auth cookie (Same as aci.request(), but also supports XML)
if 'port' in aci.params and aci.params.get('port') is not None:
aci.url = '%(protocol)s://%(host)s:%(port)s/' % aci.params + path.lstrip('/')
else:
aci.url = '%(protocol)s://%(host)s/' % aci.params + path.lstrip('/')
if aci.params.get('method') != 'get':
path += '?rsp-subtree=modified'
aci.url = update_qsl(aci.url, {'rsp-subtree': 'modified'})
# Sign and encode request as to APIC's wishes
if aci.params.get('private_key') is not None:
aci.cert_auth(path=path, payload=payload)
aci.method = aci.params.get('method').upper()
# Perform request
resp, info = fetch_url(module, aci.url,
data=payload,
headers=aci.headers,
method=aci.method,
timeout=aci.params.get('timeout'),
use_proxy=aci.params.get('use_proxy'))
aci.response = info.get('msg')
aci.status = info.get('status')
# Report failure
if info.get('status') != 200:
try:
# APIC error
aci.response_type(info.get('body'), rest_type)
aci.fail_json(msg='APIC Error %(code)s: %(text)s' % aci.error)
except KeyError:
# Connection error
aci.fail_json(msg='Connection failed for %(url)s. %(msg)s' % info)
aci.response_type(resp.read(), rest_type)
aci.result['imdata'] = aci.imdata
aci.result['totalCount'] = aci.totalCount
# Report success
aci.exit_json(**aci.result)
if __name__ == '__main__':
main()
| gpl-3.0 |
LLNL/spack | var/spack/repos/builtin/packages/muparser/package.py | 5 | 1625 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Muparser(Package):
"""C++ math expression parser library."""
homepage = "http://muparser.beltoforion.de/"
url = "https://github.com/beltoforion/muparser/archive/v2.2.5.tar.gz"
version('2.2.6.1', sha256='d2562853d972b6ddb07af47ce8a1cdeeb8bb3fa9e8da308746de391db67897b3')
version('2.2.5', sha256='0666ef55da72c3e356ca85b6a0084d56b05dd740c3c21d26d372085aa2c6e708')
# Replace std::auto_ptr by std::unique_ptr
# https://github.com/beltoforion/muparser/pull/46
patch('auto_ptr.patch',
when='@2.2.5')
depends_on('[email protected]:', when='@2.2.6:', type='build')
# Cmake build since 2.2.6
@when('@2.2.6:')
def install(self, spec, prefix):
cmake_args = [
'-DENABLE_SAMPLES=OFF',
'-DENABLE_OPENMP=OFF',
'-DBUILD_SHARED_LIBS=ON'
]
cmake_args.extend(std_cmake_args)
with working_dir('spack-build', create=True):
cmake('..', *cmake_args)
make()
make('install')
@when('@2.2.5')
def install(self, spec, prefix):
options = ['--disable-debug',
'--disable-samples',
'--disable-dependency-tracking',
'CXXFLAGS={0}'.format(self.compiler.cxx11_flag),
'--prefix=%s' % prefix]
configure(*options)
make(parallel=False)
make("install")
| lgpl-2.1 |
pkuyym/Paddle | paddle/contrib/float16/float16_transpiler.py | 3 | 11063 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.framework import Program
from paddle.fluid.executor import global_scope
class Float16Transpiler:
def transpile(self, program, place, scope=None):
'''
Transpile the program desc and cast the weights to float16 data type to
enable float16 inference.
Since the operator in a program desc will automatically choose the
right compute kernel to run based on the data type of the input tensor.
We actually don't need to change the program desc to run in float16 mode.
However, in this way, users who are used to feeding and fetching tensors
of float32 data type when running typical inference may find it confusing
and difficult to run inference in float16 mode as they need to convert
input data to float16 dtype and then convert the results back to float32
dtype to match the rest of code.
So this function appends cast ops to the program desc where necessary so
that users are able to run inference in float16 mode while providing input
tensor (feed_holder) of float data type and obtaining output tensor
(fetch_holder) of float data type.
Moreover, it is desired that when we have the scope and program desc to run
inference in float32 mode, we can use a single API to do the necessary
modification and then user can run float16 inference on the fly. To make
this happen, this function also create new parameters in the scope to have the
converted float16 weights and change the operators in program desc to use
these new parameters.
:param program: program to transpile
:type program: Program
:param place: inference place
:type place: Place
:param scope: inference scope
:type scope: Scope
'''
if not isinstance(program, Program):
raise TypeError("program should be as Program type")
if not isinstance(place, core.CPUPlace) and not isinstance(
place, core.CUDAPlace):
raise TypeError("place should be as CPUPlace/CUDAPlace type")
if scope is None:
scope = global_scope()
if not isinstance(scope, core.Scope):
raise TypeError("scope should be as Scope type or None")
self.scope = scope
self.place = place
self.block = program.block(0)
self.input_map = {} # store the input names should be adjusted
self._modify_feed_fetch()
self._convert_param_to_float16()
self._adjust_input(skip=True)
self._remove_unused_var()
# TODO(luotao): use clone() method to flush the program.desc in force,
# since some large program.desc will not be flushed immediately.
# And a better solution will be considered later.
program = program.clone()
# ====================== private transpiler functions =====================
def _adjust_input(self, skip=False):
'''
Change the input variable name in operators.
When we are in the process of modifying a program desc, we usually
replace some variables with some other variables, where we create
a dictionary input_map to record the one-to-one correspondence
between each old variable and the new one.
After that, this function will search all the operators that use the
old variables and change the info in op to use the new variables. There
maybe some exceptions to this rule when we are using the float16 transpiler
and insert cast ops to cast float32 variable to float16 one. After we
insert the cast op to cast var_1 to var_1_fp16, we don't want to change
the input of cast op to var_1_fp16 after using this function.
'''
skip_ops = {"cast"}
for i in range(len(self.block.ops)):
current_op = self.block.ops[i]
if skip and current_op.type in skip_ops:
continue
for input_arg in current_op.input_arg_names:
if input_arg in self.input_map:
current_op.rename_input(input_arg,
self.input_map[input_arg])
def _remove_unused_var(self):
'''
remove unused varibles in program
'''
args = []
for i in range(len(self.block.ops)):
current_op = self.block.ops[i]
args += current_op.input_arg_names
args += current_op.output_arg_names
args = list(set(args)) # unique the input and output arguments
for var in self.block.vars.keys():
if var not in args:
self.block.remove_var(var)
def _modify_feed_fetch(self):
'''
Modify feed fetch op/vars for float16 inference.
For each feed op:
feed_op->feed_target_var
Change it to:
feed_op->feed_target_var->cast_op(from other dtype to float16)->tmp_var
For each fetch op:
fetch_target_var->fetch_op
Change it to:
tmp_var->cast_op(from float16 to other dtype)->fetch_target_var->fetch_op
:return: None
'''
def find_op(var):
# It is possible that var.op is not up to date after some
# modifications to program desc. Here we force to make it up to date.
var.op = None
for op in self.block.ops:
if var.name in op.output_arg_names:
var.op = op
break
if var.op is None:
raise ValueError("The target variable must have an "
"associated operator that generates it.")
i = 0
while i < len(self.block.ops):
cur_op = self.block.ops[i]
if cur_op.type == "feed":
var_name = cur_op.output("Out")[0]
tmp_var_name = var_name + ".fp16"
var = self.block.vars[var_name]
tmp_var = self.block.create_var(
name=tmp_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape,
persistable=var.persistable)
self.block.insert_op(
i + 1,
type="cast",
inputs={"X": var},
outputs={"Out": tmp_var},
attrs={
'in_dtype': int(var.dtype),
'out_dtype': int(tmp_var.dtype)
})
self.input_map[var_name] = tmp_var_name
i = i + 1
elif cur_op.type == "fetch":
var_name = cur_op.input("X")[0]
tmp_var_name = var_name + ".fp16"
var = self.block.vars[var_name]
tmp_var = self.block.create_var(
name=tmp_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape,
persistable=var.persistable)
find_op(var)
var.op.rename_output(var_name, tmp_var_name)
self.block.insert_op(
i,
type="cast",
inputs={"X": tmp_var},
outputs={"Out": var},
attrs={
'in_dtype': int(tmp_var.dtype),
'out_dtype': int(var.dtype)
})
i = i + 1
i = i + 1
def _convert_param_to_float16(self):
def _get_no_fp16_conversion_var_names():
'''
Get the set of input variable names that shouldn't be converted to float16.
When we want to run inference in float16 mode, most parameters need to be
firstly converted to float16. However, there are some parameters that
shouldn't be converted to float16 because the corresponding operator
requires float32 parameters even in float16 mode (when the input data is
of float16 data type). Currently, the only operator that has this exclusion
is the batch norm op.
:return: set of input variable names
:type var_names: set
'''
op_names = {'batch_norm'}
var_names = []
for op in self.block.ops:
if op.type in op_names:
var_names += op.input_arg_names
return set(var_names)
def _should_be_converted(var):
return var.persistable and \
var.name not in self.no_conversion_vars and \
var.type != core.VarDesc.VarType.FEED_MINIBATCH and \
var.type != core.VarDesc.VarType.FETCH_LIST
self.no_conversion_vars = _get_no_fp16_conversion_var_names()
conversion_var_list = filter(_should_be_converted,
self.block.vars.values())
for var in conversion_var_list:
fp16_var_name = var.name + ".fp16"
fp16_var = self.block.create_parameter(
name=fp16_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape)
# cast the data in the tensor of the original var to float16
# data type and store it in the tensor of the new float16 var
self.scope.var(fp16_var_name)
fp16_tensor = self.scope.find_var(fp16_var_name).get_tensor()
tensor = np.array(self.scope.find_var(var.name).get_tensor())
# After the old tensor data is converted to np.float16, view(np.uint16)
# is used so that the internal memory of the numpy array will be
# reinterpreted to be of np.uint16 data type, which is binded to fluid
# float16 data type via the help of pybind in tensor_py.h.
fp16_tensor.set(
tensor.astype(np.float16).view(np.uint16), self.place)
# old var will be replaced by the fp16 var in program desc
self.input_map[var.name] = fp16_var_name
self.block.remove_var(var.name)
| apache-2.0 |
lshain-android-source/external-chromium_org | media/tools/constrained_network_server/traffic_control_test.py | 187 | 5943 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End-to-end tests for traffic control library."""
import os
import re
import sys
import unittest
import traffic_control
class TrafficControlTests(unittest.TestCase):
"""System tests for traffic_control functions.
These tests require root access.
"""
# A dummy interface name to use instead of real interface.
_INTERFACE = 'myeth'
def setUp(self):
"""Setup a dummy interface."""
# If we update to python version 2.7 or newer we can use setUpClass() or
# unittest.skipIf().
if os.getuid() != 0:
sys.exit('You need root access to run these tests.')
command = ['ip', 'link', 'add', 'name', self._INTERFACE, 'type', 'dummy']
traffic_control._Exec(command, 'Error creating dummy interface %s.' %
self._INTERFACE)
def tearDown(self):
"""Teardown the dummy interface and any network constraints on it."""
# Deleting the dummy interface deletes all associated constraints.
command = ['ip', 'link', 'del', self._INTERFACE]
traffic_control._Exec(command)
def testExecOutput(self):
output = traffic_control._Exec(['echo', ' Test '])
self.assertEqual(output, 'Test')
def testExecException(self):
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._Exec, command=['ls', '!doesntExist!'])
def testExecErrorCustomMsg(self):
try:
traffic_control._Exec(['ls', '!doesntExist!'], msg='test_msg')
self.fail('No exception raised for invalid command.')
except traffic_control.TrafficControlError as e:
self.assertEqual(e.msg, 'test_msg')
def testAddRootQdisc(self):
"""Checks adding a root qdisc is successful."""
config = {'interface': self._INTERFACE}
root_detail = 'qdisc htb 1: root'
# Assert no htb root at startup.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(root_detail in output)
traffic_control._AddRootQdisc(config['interface'])
output = traffic_control._Exec(command)
# Assert htb root is added.
self.assertTrue(root_detail in output)
def testConfigureClassAdd(self):
"""Checks adding and deleting a class to the root qdisc."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000
}
class_detail = ('class htb 1:%x root prio 0 rate %dKbit ceil %dKbit' %
(config['port'], config['bandwidth'], config['bandwidth']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Assert class does not exist prior to adding it.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert class is added.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertTrue(class_detail in output)
# Delete class.
traffic_control._ConfigureClass('del', config)
# Assert class is deleted.
command = ['tc', 'class', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
self.assertFalse(class_detail in output)
def testAddSubQdisc(self):
"""Checks adding a sub qdisc to existing class."""
config = {
'interface': self._INTERFACE,
'port': 12345,
'server_port': 33333,
'bandwidth': 2000,
'latency': 250,
'loss': 5
}
qdisc_re_detail = ('qdisc netem %x: parent 1:%x .* delay %d.0ms loss %d%%' %
(config['port'], config['port'], config['latency'],
config['loss']))
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Assert qdisc does not exist prior to adding it.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertEqual(handle_id_re, None)
# Add qdisc to class.
traffic_control._AddSubQdisc(config)
# Assert qdisc is added.
command = ['tc', 'qdisc', 'ls', 'dev', config['interface']]
output = traffic_control._Exec(command)
handle_id_re = re.search(qdisc_re_detail, output)
self.assertNotEqual(handle_id_re, None)
def testAddDeleteFilter(self):
config = {
'interface': self._INTERFACE,
'port': 12345,
'bandwidth': 2000
}
# Assert no filter exists.
command = ['tc', 'filter', 'list', 'dev', config['interface'], 'parent',
'1:0']
output = traffic_control._Exec(command)
self.assertEqual(output, '')
# Create the root and class to which the filter will be attached.
# Add root qdisc.
traffic_control._AddRootQdisc(config['interface'])
# Add class to root.
traffic_control._ConfigureClass('add', config)
# Add the filter.
traffic_control._AddFilter(config['interface'], config['port'])
handle_id = traffic_control._GetFilterHandleId(config['interface'],
config['port'])
self.assertNotEqual(handle_id, None)
# Delete the filter.
# The output of tc filter list is not None because tc adds default filters.
traffic_control._DeleteFilter(config['interface'], config['port'])
self.assertRaises(traffic_control.TrafficControlError,
traffic_control._GetFilterHandleId, config['interface'],
config['port'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
nishad-jobsglobal/odoo-marriot | addons/account/project/project.py | 273 | 2423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, Odoo will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines', copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
blaze/dask | dask/array/reductions.py | 1 | 52217 | import builtins
from collections.abc import Iterable
import operator
from functools import partial
from itertools import product, repeat
from math import factorial, log, ceil, log2
import numpy as np
from numbers import Integral, Number
from tlz import compose, partition_all, get, accumulate, pluck, drop
from . import chunk
from .core import _concatenate2, Array, handle_out, implements
from .blockwise import blockwise
from ..blockwise import lol_tuples
from .creation import arange, diagonal
from .utils import full_like_safe, validate_axis, compute_meta, is_arraylike
from .wrap import zeros, ones
from .numpy_compat import ma_divide, divide as np_divide
from ..base import tokenize
from ..highlevelgraph import HighLevelGraph
from ..utils import (
ignoring,
funcname,
Dispatch,
deepmap,
getargspec,
derived_from,
is_series_like,
)
from .. import config
# Generic functions to support chunks of different types
empty_lookup = Dispatch("empty")
empty_lookup.register((object, np.ndarray), np.empty)
empty_lookup.register(np.ma.masked_array, np.ma.empty)
divide_lookup = Dispatch("divide")
divide_lookup.register((object, np.ndarray), np_divide)
divide_lookup.register(np.ma.masked_array, ma_divide)
def divide(a, b, dtype=None):
key = lambda x: getattr(x, "__array_priority__", float("-inf"))
f = divide_lookup.dispatch(type(builtins.max(a, b, key=key)))
return f(a, b, dtype=dtype)
def reduction(
x,
chunk,
aggregate,
axis=None,
keepdims=False,
dtype=None,
split_every=None,
combine=None,
name=None,
out=None,
concatenate=True,
output_size=1,
meta=None,
):
"""General version of reductions
Parameters
----------
x: Array
Data being reduced along one or more axes
chunk: callable(x_chunk, axis, keepdims)
First function to be executed when resolving the dask graph.
This function is applied in parallel to all original chunks of x.
See below for function parameters.
combine: callable(x_chunk, axis, keepdims), optional
Function used for intermediate recursive aggregation (see
split_every below). If omitted, it defaults to aggregate.
If the reduction can be performed in less than 3 steps, it will not
be invoked at all.
aggregate: callable(x_chunk, axis, keepdims)
Last function to be executed when resolving the dask graph,
producing the final output. It is always invoked, even when the reduced
Array counts a single chunk along the reduced axes.
axis: int or sequence of ints, optional
Axis or axes to aggregate upon. If omitted, aggregate along all axes.
keepdims: boolean, optional
Whether the reduction function should preserve the reduced axes,
leaving them at size ``output_size``, or remove them.
dtype: np.dtype
data type of output. This argument was previously optional, but
leaving as ``None`` will now raise an exception.
split_every: int >= 2 or dict(axis: int), optional
Determines the depth of the recursive aggregation. If set to or more
than the number of input chunks, the aggregation will be performed in
two steps, one ``chunk`` function per input chunk and a single
``aggregate`` function at the end. If set to less than that, an
intermediate ``combine`` function will be used, so that any one
``combine`` or ``aggregate`` function has no more than ``split_every``
inputs. The depth of the aggregation graph will be
:math:`log_{split_every}(input chunks along reduced axes)`. Setting to
a low value can reduce cache size and network transfers, at the cost of
more CPU and a larger dask graph.
Omit to let dask heuristically decide a good default. A default can
also be set globally with the ``split_every`` key in
:mod:`dask.config`.
name: str, optional
Prefix of the keys of the intermediate and output nodes. If omitted it
defaults to the function names.
out: Array, optional
Another dask array whose contents will be replaced. Omit to create a
new one. Note that, unlike in numpy, this setting gives no performance
benefits whatsoever, but can still be useful if one needs to preserve
the references to a previously existing Array.
concatenate: bool, optional
If True (the default), the outputs of the ``chunk``/``combine``
functions are concatenated into a single np.array before being passed
to the ``combine``/``aggregate`` functions. If False, the input of
``combine`` and ``aggregate`` will be either a list of the raw outputs
of the previous step or a single output, and the function will have to
concatenate it itself. It can be useful to set this to False if the
chunk and/or combine steps do not produce np.arrays.
output_size: int >= 1, optional
Size of the output of the ``aggregate`` function along the reduced
axes. Ignored if keepdims is False.
Returns
-------
dask array
**Function Parameters**
x_chunk: numpy.ndarray
Individual input chunk. For ``chunk`` functions, it is one of the
original chunks of x. For ``combine`` and ``aggregate`` functions, it's
the concatenation of the outputs produced by the previous ``chunk`` or
``combine`` functions. If concatenate=False, it's a list of the raw
outputs from the previous functions.
axis: tuple
Normalized list of axes to reduce upon, e.g. ``(0, )``
Scalar, negative, and None axes have been normalized away.
Note that some numpy reduction functions cannot reduce along multiple
axes at once and strictly require an int in input. Such functions have
to be wrapped to cope.
keepdims: bool
Whether the reduction function should preserve the reduced axes or
remove them.
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, Integral):
axis = (axis,)
axis = validate_axis(axis, x.ndim)
if dtype is None:
raise ValueError("Must specify dtype")
if "dtype" in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if "dtype" in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
if is_series_like(x):
x = x.values
# Map chunk across all blocks
inds = tuple(range(x.ndim))
# The dtype of `tmp` doesn't actually matter, and may be incorrect.
tmp = blockwise(
chunk, inds, x, inds, axis=axis, keepdims=True, token=name, dtype=dtype or float
)
tmp._chunks = tuple(
(output_size,) * len(c) if i in axis else c for i, c in enumerate(tmp.chunks)
)
if meta is None and hasattr(x, "_meta"):
try:
reduced_meta = compute_meta(
chunk, x.dtype, x._meta, axis=axis, keepdims=True, computing_meta=True
)
except TypeError:
reduced_meta = compute_meta(
chunk, x.dtype, x._meta, axis=axis, keepdims=True
)
except ValueError:
pass
else:
reduced_meta = None
result = _tree_reduce(
tmp,
aggregate,
axis,
keepdims,
dtype,
split_every,
combine,
name=name,
concatenate=concatenate,
reduced_meta=reduced_meta,
)
if keepdims and output_size != 1:
result._chunks = tuple(
(output_size,) if i in axis else c for i, c in enumerate(tmp.chunks)
)
if meta is not None:
result._meta = meta
return handle_out(out, result)
def _tree_reduce(
x,
aggregate,
axis,
keepdims,
dtype,
split_every=None,
combine=None,
name=None,
concatenate=True,
reduced_meta=None,
):
"""Perform the tree reduction step of a reduction.
Lower level, users should use ``reduction`` or ``arg_reduction`` directly.
"""
# Normalize split_every
split_every = split_every or config.get("split_every", 4)
if isinstance(split_every, dict):
split_every = dict((k, split_every.get(k, 2)) for k in axis)
elif isinstance(split_every, Integral):
n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)
split_every = dict.fromkeys(axis, n)
else:
raise ValueError("split_every must be a int or a dict")
# Reduce across intermediates
depth = 1
for i, n in enumerate(x.numblocks):
if i in split_every and split_every[i] != 1:
depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
func = partial(combine or aggregate, axis=axis, keepdims=True)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
for i in range(depth - 1):
x = partial_reduce(
func,
x,
split_every,
True,
dtype=dtype,
name=(name or funcname(combine or aggregate)) + "-partial",
reduced_meta=reduced_meta,
)
func = partial(aggregate, axis=axis, keepdims=keepdims)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
return partial_reduce(
func,
x,
split_every,
keepdims=keepdims,
dtype=dtype,
name=(name or funcname(aggregate)) + "-aggregate",
reduced_meta=reduced_meta,
)
def partial_reduce(
func, x, split_every, keepdims=False, dtype=None, name=None, reduced_meta=None
):
"""Partial reduction across multiple axes.
Parameters
----------
func : function
x : Array
split_every : dict
Maximum reduction block sizes in each dimension.
Examples
--------
Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
dimension, and 3 blocks in the 2nd dimension:
>>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP
"""
name = (
(name or funcname(func)) + "-" + tokenize(func, x, split_every, keepdims, dtype)
)
parts = [
list(partition_all(split_every.get(i, 1), range(n)))
for (i, n) in enumerate(x.numblocks)
]
keys = product(*map(range, map(len, parts)))
out_chunks = [
tuple(1 for p in partition_all(split_every[i], c)) if i in split_every else c
for (i, c) in enumerate(x.chunks)
]
if not keepdims:
out_axis = [i for i in range(x.ndim) if i not in split_every]
getter = lambda k: get(out_axis, k)
keys = map(getter, keys)
out_chunks = list(getter(out_chunks))
dsk = {}
for k, p in zip(keys, product(*parts)):
decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
dummy = dict(i for i in enumerate(p) if i[0] not in decided)
g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
dsk[(name,) + k] = (func, g)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
meta = x._meta
if reduced_meta is not None:
try:
meta = func(reduced_meta, computing_meta=True)
# no meta keyword argument exists for func, and it isn't required
except TypeError:
try:
meta = func(reduced_meta)
except ValueError as e:
# min/max functions have no identity, don't apply function to meta
if "zero-size array to reduction operation" in str(e):
meta = reduced_meta
# when no work can be computed on the empty array (e.g., func is a ufunc)
except ValueError:
pass
# some functions can't compute empty arrays (those for which reduced_meta
# fall into the ValueError exception) and we have to rely on reshaping
# the array according to len(out_chunks)
if is_arraylike(meta) and meta.ndim != len(out_chunks):
if len(out_chunks) == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * len(out_chunks))
if np.isscalar(meta):
return Array(graph, name, out_chunks, dtype=dtype)
else:
with ignoring(AttributeError):
meta = meta.astype(dtype)
return Array(graph, name, out_chunks, meta=meta)
@derived_from(np)
def sum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is None:
dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), "dtype", object)
result = reduction(
a,
chunk.sum,
chunk.sum,
axis=axis,
keepdims=keepdims,
dtype=dtype,
split_every=split_every,
out=out,
)
return result
@derived_from(np)
def prod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.empty((1,), dtype=a.dtype).prod(), "dtype", object)
return reduction(
a,
chunk.prod,
chunk.prod,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
)
@implements(np.min, np.amin)
@derived_from(np)
def min(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.min,
chunk.min,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
@implements(np.max, np.amax)
@derived_from(np)
def max(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.max,
chunk.max,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
@derived_from(np)
def any(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.any,
chunk.any,
axis=axis,
keepdims=keepdims,
dtype="bool",
split_every=split_every,
out=out,
)
@derived_from(np)
def all(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.all,
chunk.all,
axis=axis,
keepdims=keepdims,
dtype="bool",
split_every=split_every,
out=out,
)
@derived_from(np)
def nansum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
chunk.nansum,
chunk.sum,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
)
with ignoring(AttributeError):
@derived_from(np)
def nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
chunk.nanprod,
chunk.prod,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
)
@derived_from(np)
def nancumsum(x, axis, dtype=None, out=None, *, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumsum. Default is 'sequential'.
* 'sequential' performs the cumsum of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by
first taking the sum of each block and combines the sums via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
chunk.nancumsum,
operator.add,
0,
x,
axis,
dtype,
out=out,
method=method,
preop=np.nansum,
)
@derived_from(np)
def nancumprod(x, axis, dtype=None, out=None, *, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumprod. Default is 'sequential'.
* 'sequential' performs the cumprod of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first
taking the product of each block and combines the products via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
chunk.nancumprod,
operator.mul,
1,
x,
axis,
dtype,
out=out,
method=method,
preop=np.nanprod,
)
@derived_from(np)
def nanmin(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.nanmin,
chunk.nanmin,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
@derived_from(np)
def nanmax(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(
a,
chunk.nanmax,
chunk.nanmax,
axis=axis,
keepdims=keepdims,
dtype=a.dtype,
split_every=split_every,
out=out,
)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
if hasattr(x, "mask"):
return chunk.sum(np.ones_like(x), **kwargs)
shape = x.shape
keepdims = kwargs.get("keepdims", False)
axis = kwargs.get("axis", None)
dtype = kwargs.get("dtype", np.float64)
if axis is None:
prod = np.prod(shape, dtype=dtype)
return (
full_like_safe(x, prod, shape=(1,) * len(shape), dtype=dtype)
if keepdims is True
else prod
)
if not isinstance(axis, tuple or list):
axis = [axis]
prod = np.prod([shape[dim] for dim in axis])
if keepdims is True:
new_shape = tuple(
shape[dim] if dim not in axis else 1 for dim in range(len(shape))
)
else:
new_shape = tuple(shape[dim] for dim in range(len(shape)) if dim not in axis)
return full_like_safe(x, prod, shape=new_shape, dtype=dtype)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~(np.isnan(x)), **kwargs)
def mean_chunk(
x, sum=chunk.sum, numel=numel, dtype="f8", computing_meta=False, **kwargs
):
if computing_meta:
return x
n = numel(x, dtype=dtype, **kwargs)
total = sum(x, dtype=dtype, **kwargs)
return {"n": n, "total": total}
def mean_combine(
pairs,
sum=chunk.sum,
numel=numel,
dtype="f8",
axis=None,
computing_meta=False,
**kwargs,
):
if not isinstance(pairs, list):
pairs = [pairs]
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
n = _concatenate2(ns, axes=axis).sum(axis=axis, **kwargs)
if computing_meta:
return n
totals = deepmap(lambda pair: pair["total"], pairs)
total = _concatenate2(totals, axes=axis).sum(axis=axis, **kwargs)
return {"n": n, "total": total}
def mean_agg(pairs, dtype="f8", axis=None, computing_meta=False, **kwargs):
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
n = _concatenate2(ns, axes=axis)
n = np.sum(n, axis=axis, dtype=dtype, **kwargs)
if computing_meta:
return n
totals = deepmap(lambda pair: pair["total"], pairs)
total = _concatenate2(totals, axes=axis).sum(axis=axis, dtype=dtype, **kwargs)
return divide(total, n, dtype=dtype)
@derived_from(np)
def mean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
elif a.dtype == object:
dt = object
else:
dt = getattr(np.mean(np.zeros(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
mean_chunk,
mean_agg,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
combine=mean_combine,
out=out,
concatenate=False,
)
@derived_from(np)
def nanmean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.mean(np.empty(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg,
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
concatenate=False,
combine=partial(mean_combine, sum=chunk.nansum, numel=nannumel),
)
with ignoring(AttributeError):
nanmean = derived_from(np)(nanmean)
def moment_chunk(
A, order=2, sum=chunk.sum, numel=numel, dtype="f8", computing_meta=False, **kwargs
):
if computing_meta:
return A
n = numel(A, **kwargs)
n = n.astype(np.int64)
total = sum(A, dtype=dtype, **kwargs)
with np.errstate(divide="ignore", invalid="ignore"):
u = total / n
xs = [sum((A - u) ** i, dtype=dtype, **kwargs) for i in range(2, order + 1)]
M = np.stack(xs, axis=-1)
return {"total": total, "n": n, "M": M}
def _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs):
M = Ms[..., order - 2].sum(axis=axis, **kwargs) + sum(
ns * inner_term ** order, axis=axis, **kwargs
)
for k in range(1, order - 1):
coeff = factorial(order) / (factorial(k) * factorial(order - k))
M += coeff * sum(Ms[..., order - k - 2] * inner_term ** k, axis=axis, **kwargs)
return M
def moment_combine(
pairs,
order=2,
ddof=0,
dtype="f8",
sum=np.sum,
axis=None,
computing_meta=False,
**kwargs,
):
if not isinstance(pairs, list):
pairs = [pairs]
kwargs["dtype"] = dtype
kwargs["keepdims"] = True
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
ns = _concatenate2(ns, axes=axis)
n = ns.sum(axis=axis, **kwargs)
if computing_meta:
return n
totals = _concatenate2(deepmap(lambda pair: pair["total"], pairs), axes=axis)
Ms = _concatenate2(deepmap(lambda pair: pair["M"], pairs), axes=axis)
total = totals.sum(axis=axis, **kwargs)
with np.errstate(divide="ignore", invalid="ignore"):
mu = divide(total, n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
xs = [
_moment_helper(Ms, ns, inner_term, o, sum, axis, kwargs)
for o in range(2, order + 1)
]
M = np.stack(xs, axis=-1)
return {"total": total, "n": n, "M": M}
def moment_agg(
pairs,
order=2,
ddof=0,
dtype="f8",
sum=np.sum,
axis=None,
computing_meta=False,
**kwargs,
):
if not isinstance(pairs, list):
pairs = [pairs]
kwargs["dtype"] = dtype
# To properly handle ndarrays, the original dimensions need to be kept for
# part of the calculation.
keepdim_kw = kwargs.copy()
keepdim_kw["keepdims"] = True
ns = deepmap(lambda pair: pair["n"], pairs) if not computing_meta else pairs
ns = _concatenate2(ns, axes=axis)
n = ns.sum(axis=axis, **keepdim_kw)
if computing_meta:
return n
totals = _concatenate2(deepmap(lambda pair: pair["total"], pairs), axes=axis)
Ms = _concatenate2(deepmap(lambda pair: pair["M"], pairs), axes=axis)
mu = divide(totals.sum(axis=axis, **keepdim_kw), n, dtype=dtype)
with np.errstate(divide="ignore", invalid="ignore"):
inner_term = divide(totals, ns, dtype=dtype) - mu
M = _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs)
denominator = n.sum(axis=axis, **kwargs) - ddof
# taking care of the edge case with empty or all-nans array with ddof > 0
if isinstance(denominator, Number):
if denominator < 0:
denominator = np.nan
elif denominator is not np.ma.masked:
denominator[denominator < 0] = np.nan
return divide(M, denominator, dtype=dtype)
def moment(
a, order, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
if not isinstance(order, Integral) or order < 0:
raise ValueError("Order must be an integer >= 0")
if order < 2:
reduced = a.sum(axis=axis) # get reduced shape and chunks
if order == 0:
# When order equals 0, the result is 1, by definition.
return ones(
reduced.shape, chunks=reduced.chunks, dtype="f8", meta=reduced._meta
)
# By definition the first order about the mean is 0.
return zeros(
reduced.shape, chunks=reduced.chunks, dtype="f8", meta=reduced._meta
)
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
partial(moment_chunk, order=order),
partial(moment_agg, order=order, ddof=ddof),
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
out=out,
concatenate=False,
combine=partial(moment_combine, order=order),
)
@derived_from(np)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
moment_chunk,
partial(moment_agg, ddof=ddof),
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
combine=moment_combine,
name="var",
out=out,
concatenate=False,
)
@derived_from(np)
def nanvar(
a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), "dtype", object)
return reduction(
a,
partial(moment_chunk, sum=chunk.nansum, numel=nannumel),
partial(moment_agg, sum=np.nansum, ddof=ddof),
axis=axis,
keepdims=keepdims,
dtype=dt,
split_every=split_every,
combine=partial(moment_combine, sum=np.nansum),
out=out,
concatenate=False,
)
with ignoring(AttributeError):
nanvar = derived_from(np)(nanvar)
def _sqrt(a):
o = np.sqrt(a)
if isinstance(o, np.ma.masked_array) and not o.shape and o.mask.all():
return np.ma.masked
return o
def safe_sqrt(a):
"""A version of sqrt that properly handles scalar masked arrays.
To mimic ``np.ma`` reductions, we need to convert scalar masked arrays that
have an active mask to the ``np.ma.masked`` singleton. This is properly
handled automatically for reduction code, but not for ufuncs. We implement
a simple version here, since calling `np.ma.sqrt` everywhere is
significantly more expensive.
"""
if hasattr(a, "_elemwise"):
return a._elemwise(_sqrt, a)
return _sqrt(a)
@derived_from(np)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):
result = safe_sqrt(
var(
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
)
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
@derived_from(np)
def nanstd(
a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
result = safe_sqrt(
nanvar(
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
)
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = derived_from(np)(nanstd)
def _arg_combine(data, axis, argfunc, keepdims=False):
""" Merge intermediate results from ``arg_*`` functions"""
axis = None if len(axis) == data.ndim or data.ndim == 1 else axis[0]
vals = data["vals"]
arg = data["arg"]
if axis is None:
local_args = argfunc(vals, axis=axis, keepdims=keepdims)
vals = vals.ravel()[local_args]
arg = arg.ravel()[local_args]
else:
local_args = argfunc(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
inds.insert(axis, local_args)
inds = tuple(inds)
vals = vals[inds]
arg = arg[inds]
if keepdims:
vals = np.expand_dims(vals, axis)
arg = np.expand_dims(arg, axis)
return arg, vals
def arg_chunk(func, argfunc, x, axis, offset_info):
arg_axis = None if len(axis) == x.ndim or x.ndim == 1 else axis[0]
vals = func(x, axis=arg_axis, keepdims=True)
arg = argfunc(x, axis=arg_axis, keepdims=True)
if arg_axis is None:
offset, total_shape = offset_info
ind = np.unravel_index(arg.ravel()[0], x.shape)
total_ind = tuple(o + i for (o, i) in zip(offset, ind))
arg[:] = np.ravel_multi_index(total_ind, total_shape)
else:
arg += offset_info
if isinstance(vals, np.ma.masked_array):
if "min" in argfunc.__name__:
fill_value = np.ma.minimum_fill_value(vals)
else:
fill_value = np.ma.maximum_fill_value(vals)
vals = np.ma.filled(vals, fill_value)
result = np.empty(
shape=vals.shape, dtype=[("vals", vals.dtype), ("arg", arg.dtype)]
)
result["vals"] = vals
result["arg"] = arg
return result
def arg_combine(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=True)
result = np.empty(
shape=vals.shape, dtype=[("vals", vals.dtype), ("arg", arg.dtype)]
)
result["vals"] = vals
result["arg"] = arg
return result
def arg_agg(func, argfunc, data, axis=None, **kwargs):
return _arg_combine(data, axis, argfunc, keepdims=False)[0]
def nanarg_agg(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=False)
if np.any(np.isnan(vals)):
raise ValueError("All NaN slice encountered")
return arg
def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):
"""Generic function for argreduction.
Parameters
----------
x : Array
chunk : callable
Partialed ``arg_chunk``.
combine : callable
Partialed ``arg_combine``.
agg : callable
Partialed ``arg_agg``.
axis : int, optional
split_every : int or dict, optional
"""
if axis is None:
axis = tuple(range(x.ndim))
ravel = True
elif isinstance(axis, Integral):
axis = validate_axis(axis, x.ndim)
axis = (axis,)
ravel = x.ndim == 1
else:
raise TypeError("axis must be either `None` or int, got '{0}'".format(axis))
for ax in axis:
chunks = x.chunks[ax]
if len(chunks) > 1 and np.isnan(chunks).any():
raise ValueError(
"Arg-reductions do not work with arrays that have "
"unknown chunksizes. At some point in your computation "
"this array lost chunking information.\n\n"
"A possible solution is with \n"
" x.compute_chunk_sizes()"
)
# Map chunk across all blocks
name = "arg-reduce-{0}".format(tokenize(axis, x, chunk, combine, split_every))
old = x.name
keys = list(product(*map(range, x.numblocks)))
offsets = list(product(*(accumulate(operator.add, bd[:-1], 0) for bd in x.chunks)))
if ravel:
offset_info = zip(offsets, repeat(x.shape))
else:
offset_info = pluck(axis[0], offsets)
chunks = tuple((1,) * len(c) if i in axis else c for (i, c) in enumerate(x.chunks))
dsk = dict(
((name,) + k, (chunk, (old,) + k, axis, off))
for (k, off) in zip(keys, offset_info)
)
# The dtype of `tmp` doesn't actually matter, just need to provide something
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
tmp = Array(graph, name, chunks, dtype=x.dtype)
dtype = np.argmin([1]).dtype
result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)
return handle_out(out, result)
def make_arg_reduction(func, argfunc, is_nan_func=False):
"""Create an argreduction callable
Parameters
----------
func : callable
The reduction (e.g. ``min``)
argfunc : callable
The argreduction (e.g. ``argmin``)
"""
chunk = partial(arg_chunk, func, argfunc)
combine = partial(arg_combine, func, argfunc)
if is_nan_func:
agg = partial(nanarg_agg, func, argfunc)
else:
agg = partial(arg_agg, func, argfunc)
def wrapped(x, axis=None, split_every=None, out=None):
return arg_reduction(
x, chunk, combine, agg, axis, split_every=split_every, out=out
)
wrapped.__name__ = func.__name__
return derived_from(np)(wrapped)
def _nanargmin(x, axis, **kwargs):
try:
return chunk.nanargmin(x, axis, **kwargs)
except ValueError:
return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)
def _nanargmax(x, axis, **kwargs):
try:
return chunk.nanargmax(x, axis, **kwargs)
except ValueError:
return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)
argmin = make_arg_reduction(chunk.min, chunk.argmin)
argmax = make_arg_reduction(chunk.max, chunk.argmax)
nanargmin = make_arg_reduction(chunk.nanmin, _nanargmin, True)
nanargmax = make_arg_reduction(chunk.nanmax, _nanargmax, True)
def _prefixscan_combine(func, binop, pre, x, axis, dtype):
"""Combine results of a parallel prefix scan such as cumsum
Parameters
----------
func : callable
Cumulative function (e.g. ``np.cumsum``)
binop : callable
Associative function (e.g. ``add``)
pre : np.array
The value calculated in parallel from ``preop``.
For example, the sum of all the previous blocks.
x : np.array
Current block
axis : int
dtype : dtype
Returns
-------
np.array
"""
# We could compute this in two tasks.
# This would allow us to do useful work (i.e., func), while waiting on `pre`.
# Using one task may guide the scheduler to do better and reduce scheduling overhead.
return binop(pre, func(x, axis=axis, dtype=dtype))
def _prefixscan_first(func, x, axis, dtype):
"""Compute the prefix scan (e.g., cumsum) on the first block
Parameters
----------
func : callable
Cumulative function (e.g. ``np.cumsum``)
x : np.array
Current block
axis : int
dtype : dtype
Returns
-------
np.array
"""
return func(x, axis=axis, dtype=dtype)
def prefixscan_blelloch(func, preop, binop, x, axis=None, dtype=None, out=None):
"""Generic function to perform parallel cumulative scan (a.k.a prefix scan)
The Blelloch prefix scan is work-efficient and exposes parallelism.
A parallel cumsum works by first taking the sum of each block, then do a binary tree
merge followed by a fan-out (i.e., the Brent-Kung pattern). We then take the cumsum
of each block and add the sum of the previous blocks.
When performing a cumsum across N chunks, this method has 2 * lg(N) levels of dependencies.
In contrast, the sequential method has N levels of dependencies.
Floating point operations should be more accurate with this method compared to sequential.
Parameters
----------
func : callable
Cumulative function (e.g. ``np.cumsum``)
preop : callable
Function to get the final value of a cumulative function (e.g., ``np.sum``)
binop : callable
Associative function (e.g. ``add``)
x : dask array
axis : int
dtype : dtype
Returns
-------
dask array
"""
if axis is None:
x = x.flatten()
axis = 0
if dtype is None:
dtype = getattr(func(np.empty((0,), dtype=x.dtype)), "dtype", object)
assert isinstance(axis, Integral)
axis = validate_axis(axis, x.ndim)
name = "{0}-{1}".format(func.__name__, tokenize(func, axis, preop, binop, x, dtype))
base_key = (name,)
# Right now, the metadata for batches is incorrect, but this should be okay
batches = x.map_blocks(preop, axis=axis, keepdims=True, dtype=dtype)
# We don't need the last index until the end
*indices, last_index = full_indices = [
list(
product(
*[range(nb) if j != axis else [i] for j, nb in enumerate(x.numblocks)]
)
)
for i in range(x.numblocks[axis])
]
prefix_vals = [[(batches.name,) + index for index in vals] for vals in indices]
dsk = {}
n_vals = len(prefix_vals)
level = 0
if n_vals >= 2:
# Upsweep
stride = 1
stride2 = 2
while stride2 <= n_vals:
for i in range(stride2 - 1, n_vals, stride2):
new_vals = []
for index, left_val, right_val in zip(
indices[i], prefix_vals[i - stride], prefix_vals[i]
):
key = base_key + index + (level, i)
dsk[key] = (binop, left_val, right_val)
new_vals.append(key)
prefix_vals[i] = new_vals
stride = stride2
stride2 *= 2
level += 1
# Downsweep
# With `n_vals == 3`, we would have `stride = 1` and `stride = 0`, but we need
# to do a downsweep iteration, so make sure stride2 is at least 2.
stride2 = builtins.max(2, 2 ** ceil(log2(n_vals // 2)))
stride = stride2 // 2
while stride > 0:
for i in range(stride2 + stride - 1, n_vals, stride2):
new_vals = []
for index, left_val, right_val in zip(
indices[i], prefix_vals[i - stride], prefix_vals[i]
):
key = base_key + index + (level, i)
dsk[key] = (binop, left_val, right_val)
new_vals.append(key)
prefix_vals[i] = new_vals
stride2 = stride
stride //= 2
level += 1
if full_indices:
for index in full_indices[0]:
dsk[base_key + index] = (
_prefixscan_first,
func,
(x.name,) + index,
axis,
dtype,
)
for indexes, vals in zip(drop(1, full_indices), prefix_vals):
for index, val in zip(indexes, vals):
dsk[base_key + index] = (
_prefixscan_combine,
func,
binop,
val,
(x.name,) + index,
axis,
dtype,
)
if len(full_indices) < 2:
deps = [x]
else:
deps = [x, batches]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
result = Array(graph, name, x.chunks, batches.dtype)
return handle_out(out, result)
def cumreduction(
func,
binop,
ident,
x,
axis=None,
dtype=None,
out=None,
method="sequential",
preop=None,
):
"""Generic function for cumulative reduction
Parameters
----------
func: callable
Cumulative function like np.cumsum or np.cumprod
binop: callable
Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``
ident: Number
Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``
x: dask Array
axis: int
dtype: dtype
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumsum. Default is 'sequential'.
* 'sequential' performs the scan of each prior block before the current block.
* 'blelloch' is a work-efficient parallel scan. It exposes parallelism by first
calling ``preop`` on each block and combines the values via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
preop: callable, optional
Function used by 'blelloch' method like `np.cumsum->np.sum`` or ``np.cumprod->np.prod``
Returns
-------
dask array
See also
--------
cumsum
cumprod
"""
if method == "blelloch":
if preop is None:
raise TypeError(
'cumreduction with "blelloch" method required `preop=` argument'
)
return prefixscan_blelloch(func, preop, binop, x, axis, dtype, out=out)
elif method != "sequential":
raise ValueError(
f'Invalid method for cumreduction. Expected "sequential" or "blelloch". Got: {method!r}'
)
if axis is None:
x = x.flatten()
axis = 0
if dtype is None:
dtype = getattr(func(np.empty((0,), dtype=x.dtype)), "dtype", object)
assert isinstance(axis, Integral)
axis = validate_axis(axis, x.ndim)
m = x.map_blocks(func, axis=axis, dtype=dtype)
name = "{0}-{1}".format(func.__name__, tokenize(func, axis, binop, ident, x, dtype))
n = x.numblocks[axis]
full = slice(None, None, None)
slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)
indices = list(
product(*[range(nb) if i != axis else [0] for i, nb in enumerate(x.numblocks)])
)
dsk = dict()
for ind in indices:
shape = tuple(x.chunks[i][ii] if i != axis else 1 for i, ii in enumerate(ind))
dsk[(name, "extra") + ind] = (np.full, shape, ident, m.dtype)
dsk[(name,) + ind] = (m.name,) + ind
for i in range(1, n):
last_indices = indices
indices = list(
product(
*[range(nb) if ii != axis else [i] for ii, nb in enumerate(x.numblocks)]
)
)
for old, ind in zip(last_indices, indices):
this_slice = (name, "extra") + ind
dsk[this_slice] = (
binop,
(name, "extra") + old,
(operator.getitem, (m.name,) + old, slc),
)
dsk[(name,) + ind] = (binop, this_slice, (m.name,) + ind)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[m])
result = Array(graph, name, x.chunks, m.dtype)
return handle_out(out, result)
def _cumsum_merge(a, b):
if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):
values = np.ma.getdata(a) + np.ma.getdata(b)
return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))
return a + b
def _cumprod_merge(a, b):
if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):
values = np.ma.getdata(a) * np.ma.getdata(b)
return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))
return a * b
@derived_from(np)
def cumsum(x, axis=None, dtype=None, out=None, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumsum. Default is 'sequential'.
* 'sequential' performs the cumsum of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by
first taking the sum of each block and combines the sums via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
np.cumsum,
_cumsum_merge,
0,
x,
axis,
dtype,
out=out,
method=method,
preop=np.sum,
)
@derived_from(np)
def cumprod(x, axis=None, dtype=None, out=None, method="sequential"):
"""Dask added an additional keyword-only argument ``method``.
method : {'sequential', 'blelloch'}, optional
Choose which method to use to perform the cumprod. Default is 'sequential'.
* 'sequential' performs the cumprod of each prior block before the current block.
* 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first
taking the product of each block and combines the products via a binary tree.
This method may be faster or more memory efficient depending on workload,
scheduler, and hardware. More benchmarking is necessary.
"""
return cumreduction(
np.cumprod,
_cumprod_merge,
1,
x,
axis,
dtype,
out=out,
method=method,
preop=np.prod,
)
def topk(a, k, axis=-1, split_every=None):
"""Extract the k largest elements from a on the given axis,
and return them sorted from largest to smallest.
If k is negative, extract the -k smallest elements instead,
and return them sorted from smallest to largest.
This performs best when ``k`` is much smaller than the chunk size. All
results will be returned in a single chunk along the given axis.
Parameters
----------
x: Array
Data being sorted
k: int
axis: int, optional
split_every: int >=2, optional
See :func:`reduce`. This parameter becomes very important when k is
on the same order of magnitude of the chunk size or more, as it
prevents getting the whole or a significant portion of the input array
in memory all at once, with a negative impact on network transfer
too when running on distributed.
Returns
-------
Selection of x with size abs(k) along the given axis.
Examples
--------
>>> import dask.array as da
>>> x = np.array([5, 1, 3, 6])
>>> d = da.from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
>>> d.topk(-2).compute()
array([1, 3])
"""
axis = validate_axis(axis, a.ndim)
# chunk and combine steps of the reduction, which recursively invoke
# np.partition to pick the top/bottom k elements from the previous step.
# The selection is not sorted internally.
chunk_combine = partial(chunk.topk, k=k)
# aggregate step of the reduction. Internally invokes the chunk/combine
# function, then sorts the results internally.
aggregate = partial(chunk.topk_aggregate, k=k)
return reduction(
a,
chunk=chunk_combine,
combine=chunk_combine,
aggregate=aggregate,
axis=axis,
keepdims=True,
dtype=a.dtype,
split_every=split_every,
output_size=abs(k),
)
def argtopk(a, k, axis=-1, split_every=None):
"""Extract the indices of the k largest elements from a on the given axis,
and return them sorted from largest to smallest. If k is negative, extract
the indices of the -k smallest elements instead, and return them sorted
from smallest to largest.
This performs best when ``k`` is much smaller than the chunk size. All
results will be returned in a single chunk along the given axis.
Parameters
----------
x: Array
Data being sorted
k: int
axis: int, optional
split_every: int >=2, optional
See :func:`topk`. The performance considerations for topk also apply
here.
Returns
-------
Selection of np.intp indices of x with size abs(k) along the given axis.
Examples
--------
>>> import dask.array as da
>>> x = np.array([5, 1, 3, 6])
>>> d = da.from_array(x, chunks=2)
>>> d.argtopk(2).compute()
array([3, 0])
>>> d.argtopk(-2).compute()
array([1, 2])
"""
axis = validate_axis(axis, a.ndim)
# Generate nodes where every chunk is a tuple of (a, original index of a)
idx = arange(a.shape[axis], chunks=(a.chunks[axis],), dtype=np.intp)
idx = idx[tuple(slice(None) if i == axis else np.newaxis for i in range(a.ndim))]
a_plus_idx = a.map_blocks(chunk.argtopk_preprocess, idx, dtype=object)
# chunk and combine steps of the reduction. They acquire in input a tuple
# of (a, original indices of a) and return another tuple containing the top
# k elements of a and the matching original indices. The selection is not
# sorted internally, as in np.argpartition.
chunk_combine = partial(chunk.argtopk, k=k)
# aggregate step of the reduction. Internally invokes the chunk/combine
# function, then sorts the results internally, drops a and returns the
# index only.
aggregate = partial(chunk.argtopk_aggregate, k=k)
if isinstance(axis, Number):
naxis = 1
else:
naxis = len(axis)
meta = a._meta.astype(np.intp).reshape((0,) * (a.ndim - naxis + 1))
return reduction(
a_plus_idx,
chunk=chunk_combine,
combine=chunk_combine,
aggregate=aggregate,
axis=axis,
keepdims=True,
dtype=np.intp,
split_every=split_every,
concatenate=False,
output_size=abs(k),
meta=meta,
)
@derived_from(np)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None):
return diagonal(a, offset=offset, axis1=axis1, axis2=axis2).sum(-1, dtype=dtype)
@derived_from(np)
def median(a, axis=None, keepdims=False, out=None):
"""
This works by automatically chunking the reduced axes to a single chunk
and then calling ``numpy.median`` function across the remaining dimensions
"""
if axis is None:
raise NotImplementedError(
"The da.median function only works along an axis. "
"The full algorithm is difficult to do in parallel"
)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = [ax + a.ndim if ax < 0 else ax for ax in axis]
a = a.rechunk({ax: -1 if ax in axis else "auto" for ax in range(a.ndim)})
result = a.map_blocks(
np.median,
axis=axis,
keepdims=keepdims,
drop_axis=axis if not keepdims else None,
chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]
if keepdims
else None,
)
result = handle_out(out, result)
return result
@derived_from(np)
def nanmedian(a, axis=None, keepdims=False, out=None):
"""
This works by automatically chunking the reduced axes to a single chunk
and then calling ``numpy.nanmedian`` function across the remaining dimensions
"""
if axis is None:
raise NotImplementedError(
"The da.nanmedian function only works along an axis or a subset of axes. "
"The full algorithm is difficult to do in parallel"
)
if not isinstance(axis, Iterable):
axis = (axis,)
axis = [ax + a.ndim if ax < 0 else ax for ax in axis]
a = a.rechunk({ax: -1 if ax in axis else "auto" for ax in range(a.ndim)})
result = a.map_blocks(
np.nanmedian,
axis=axis,
keepdims=keepdims,
drop_axis=axis if not keepdims else None,
chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]
if keepdims
else None,
)
result = handle_out(out, result)
return result
| bsd-3-clause |
1suming/readthedocs.org | readthedocs/projects/migrations/0035_make_null.py | 13 | 12994 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Project.num_point'
db.alter_column('projects_project', 'num_point', self.gf('django.db.models.fields.IntegerField')(max_length=3, null=True))
# Changing field 'Project.num_minor'
db.alter_column('projects_project', 'num_minor', self.gf('django.db.models.fields.IntegerField')(max_length=3, null=True))
# Changing field 'Project.num_major'
db.alter_column('projects_project', 'num_major', self.gf('django.db.models.fields.IntegerField')(max_length=3, null=True))
def backwards(self, orm):
# Changing field 'Project.num_point'
db.alter_column('projects_project', 'num_point', self.gf('django.db.models.fields.IntegerField')(max_length=3))
# Changing field 'Project.num_minor'
db.alter_column('projects_project', 'num_minor', self.gf('django.db.models.fields.IntegerField')(max_length=3))
# Changing field 'Project.num_major'
db.alter_column('projects_project', 'num_major', self.gf('django.db.models.fields.IntegerField')(max_length=3))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'builds.version': {
'Meta': {'ordering': "['-verbose_name']", 'unique_together': "[('project', 'slug')]", 'object_name': 'Version'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'built': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.emailhook': {
'Meta': {'object_name': 'EmailHook'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emailhook_notifications'", 'to': "orm['projects.Project']"})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_filed'", 'null': 'True', 'to': "orm['builds.Version']"})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crate_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '20'}),
'main_language_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['projects.Project']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_major': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_minor': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'num_point': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'python_interpreter': ('django.db.models.fields.CharField', [], {'default': "'python'", 'max_length': '20'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'through': "orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_system_packages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version_privacy_level': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '20'})
},
'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'superprojects'", 'to': "orm['projects.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subprojects'", 'to': "orm['projects.Project']"})
},
'projects.webhook': {
'Meta': {'object_name': 'WebHook'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'webhook_notifications'", 'to': "orm['projects.Project']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['projects'] | mit |
wilx/autoconf-archive | macro.py | 9 | 4436 | #! /usr/bin/env python
from contextlib import closing
import os, sys, subprocess, re, textwrap
def loadFile(path):
with closing( open(path) ) as fd:
return fd.read()
def writeFile(path, buffer):
with closing( open(path, "w") ) as fd:
fd.write(buffer)
def splitSections(buffer):
while buffer:
assert len(buffer) >= 3
name = buffer.pop(0).lower()
assert buffer.pop(0) == ''
body = []
while buffer:
line = buffer.pop(0)
if line == '' or line[0].isspace():
body.append(line[2:])
else:
buffer.insert(0, line)
yield (name, body)
body = []
break
if body:
yield (name, body)
def collapseText(lines, width = 72):
wrapper = textwrap.TextWrapper( width = width
, expand_tabs = False
, break_on_hyphens = False
, break_long_words = False
)
body = []
prev = None
for line in lines:
if line == '':
prev = None
elif line[0].isspace():
if prev == "quote":
body[-1].append(line)
else:
body.append([line])
prev = "quote"
else:
if prev == "text":
newtext = ' '.join(body[-1]) + ' ' + line
body[-1] = wrapper.wrap(newtext)
else:
body.append(wrapper.wrap(line))
prev = "text"
return body
class Macro:
def __init__(self, filePath, computeSerialNumber=False):
self.name = os.path.splitext(os.path.basename(filePath))[0]
# header and body are separated by an empty line.
(header,body) = loadFile(filePath).split("\n\n", 1)
self.body = body.split('\n')
# headers may not contain tab characters
assert not ('\t' in header)
# drop initial header (if present)
header = re.sub(r"^\n*# =+\n#[^\n]*\n# =+\n(#\n)+", '', header, 1)
# split buffer into lines and drop initial "# " prefix in the process
header = [l[2:] for l in header.split('\n')]
# set defaults
self.authors = []
# parse each section in the remaining list
for (key, body) in splitSections(header):
# drop empty lines at beginning and end of body
while body[0] == '': body.pop(0)
while body[-1] == '': body.pop(-1)
# each section has its own parser
if key == "synopsis":
if '' in body:
raise Exception("%s: malformed synopsis section" % filePath)
elif key == "description":
body = collapseText(body)
elif key == "license":
while True:
match = re.match(r"Copyright \([cC]\) ([0-9.,-]+) (.*)", body[0])
if not match: break
(year,name) = (match.group(1), match.group(2))
match = re.match(r"(.*) <(.*)>", name)
if match:
(name,email) = (match.group(1), match.group(2))
self.authors.append(dict(year = year, name = name, email = email))
else:
self.authors.append(dict(year = year, name = name))
body.pop(0)
assert self.authors
if body.pop(0) != '':
raise Exception("%s: malformed license section" % filePath)
body = collapseText(body)
elif key == "obsolete macro":
key = "obsolete"
body = collapseText(body)
elif key == "description":
body = collapseText(body)
else:
raise Exception("%s: unknown section %r in macro" % (filePath, key))
self.__dict__[key] = body
# determine the macro's serial number
if computeSerialNumber: # compute the number from git
logMessages = subprocess.check_output(["git", "log", "--oneline", "054e8ad8c766afa7059d8cd4a81bbfa99133ef5e..HEAD", "--", filePath], bufsize=1)
logLines = logMessages.rstrip(b'\n').split(b"\n")
self.serial = len(logLines)
modified = subprocess.call(["git", "diff", "--quiet", "--exit-code", "HEAD", "--", filePath])
if modified:
self.serial += 1
else: # trust the m4 file
assert self.body[0].startswith("#serial")
self.serial = int(self.body[0].split()[1])
# drop the original serial number from the body
self.body = [ l for l in self.body if not l.startswith("#serial") ]
# drop whitespace from begining and end of body
while self.body[0] == "":
self.body.pop(0)
while self.body[-1] == "":
self.body.pop(-1)
def __repr__(self):
return repr(self.__dict__)
| gpl-3.0 |
PhonologicalCorpusTools/PyAnnotationGraph | polyglotdb/query/base/query.py | 1 | 15416 | from .results import BaseQueryResults
from .func import Count
from ..base.helper import key_for_cypher, value_for_cypher
class BaseQuery(object):
query_template = '''{match}
{where}
{optional_match}
{with}
{return}'''
delete_template = '''DETACH DELETE {alias}'''
aggregate_template = '''RETURN {aggregates}{order_by}'''
distinct_template = '''RETURN {columns}{order_by}{offset}{limit}'''
set_label_template = '''{alias} {value}'''
remove_label_template = '''{alias}{value}'''
set_property_template = '''{alias}.{attribute} = {value}'''
def __init__(self, corpus, to_find):
self.corpus = corpus
self.to_find = to_find
self._criterion = []
self._columns = []
self._hidden_columns = []
self._order_by = []
self._group_by = []
self._aggregate = []
self._preload = []
self._cache = []
self._delete = False
self._set_labels = []
self._remove_labels = []
self._set_properties = {}
self._limit = None
self._offset = None
self.call_back = None
self.stop_check = None
def cache(self):
raise NotImplementedError
def required_nodes(self):
ns = {self.to_find}
tf_type = type(self.to_find)
for c in self._criterion:
ns.update(x for x in c.nodes if type(x) is not tf_type)
for c in self._columns + self._hidden_columns + self._aggregate + self._preload + self._cache:
ns.update(x for x in c.nodes if type(x) is not tf_type and x.non_optional)
for c, _ in self._order_by:
ns.update(x for x in c.nodes if type(x) is not tf_type and x.non_optional)
return ns
def optional_nodes(self):
required_nodes = self.required_nodes()
ns = set()
tf_type = type(self.to_find)
for c in self._columns + self._aggregate + self._preload + self._cache:
ns.update(x for x in c.nodes if type(x) is not tf_type and x not in required_nodes)
for c, _ in self._order_by:
ns.update(x for x in c.nodes if type(x) is not tf_type and x not in required_nodes)
return sorted(ns)
def clear_columns(self):
"""
Remove any columns specified. The default columns for any query
are the id of the token and the label of the type.
"""
self._columns = []
return self
def offset(self, number):
self._offset = number
return self
def filter(self, *args):
"""
Apply one or more filters to a query.
"""
from .elements import EqualClauseElement
for a in args:
for c in self._criterion:
if isinstance(c, EqualClauseElement) and isinstance(a, EqualClauseElement) and \
c.attribute.node == a.attribute.node and c.attribute.label == a.attribute.label:
c.value = a.value
break
else:
self._criterion.append(a)
return self
def columns(self, *args):
"""
Add one or more additional columns to the results.
Columns should be :class:`~polyglotdb.query.base.Attribute` objects.
"""
column_set = set(self._columns)
for c in args:
if c in column_set:
continue
else:
self._columns.append(c)
# column_set.add(c) # FIXME failing tests
return self
def group_by(self, *args):
"""
Specify one or more fields for how aggregates should be grouped.
"""
self._group_by.extend(args)
return self
def order_by(self, field, descending=False):
"""
Specify how the results of the query should be ordered.
Parameters
----------
field : Attribute
Determines what the ordering should be based on
descending : bool, defaults to False
Whether the order should be descending
"""
self._order_by.append((field, descending))
return self
def to_csv(self, path):
"""
Same as ``all``, but the results of the query are output to the
specified path as a CSV file.
"""
results = self.all()
if self.stop_check is not None and self.stop_check():
return
results.to_csv(path)
def count(self):
"""
Returns the number of rows in the query.
"""
self._aggregate = [Count()]
cypher = self.cypher()
value = self.corpus.execute_cypher(cypher, **self.cypher_params())
self._aggregate = []
return value.single().values()[0]
def aggregate(self, *args):
"""
Aggregate the results of the query by a grouping factor or overall.
Not specifying a ``group_by`` in the query will result in a single
result for the aggregate from the whole query.
"""
self._aggregate.extend(args)
cypher = self.cypher()
value = self.corpus.execute_cypher(cypher, **self.cypher_params())
if self._group_by or any(not x.collapsing for x in self._aggregate):
return list(value)
elif len(self._aggregate) > 1:
return list(value)[0]
else:
return value.single().values()[0]
def preload(self, *args):
self._preload.extend(args)
return self
def limit(self, limit):
""" sets object limit to parameter limit """
self._limit = limit
return self
def to_json(self):
data = {'corpus_name': self.corpus.corpus_name,
'filters': [x.for_json() for x in self._criterion],
'columns': [x.for_json() for x in self._columns]}
return data
def cypher(self):
"""
Generates a Cypher statement based on the query.
"""
kwargs = {'match': '',
'optional_match': '',
'where': '',
'with': '',
'return': ''}
# generate initial match strings
match_strings = set()
withs = set()
nodes = self.required_nodes()
for node in nodes:
if node.has_subquery:
continue
match_strings.add(node.for_match())
withs.update(node.withs)
kwargs['match'] = 'MATCH ' + ',\n'.join(match_strings)
# generate main filters
properties = []
for c in self._criterion:
if c.in_subquery:
continue
properties.append(c.for_cypher())
if properties:
kwargs['where'] += 'WHERE ' + '\nAND '.join(properties)
optional_nodes = self.optional_nodes()
optional_match_strings = []
for node in optional_nodes:
if node.has_subquery:
continue
optional_match_strings.append(node.for_match())
withs.update(node.withs)
if optional_match_strings:
s = ''
for i, o in enumerate(optional_match_strings):
s += 'OPTIONAL MATCH ' + o + '\n'
kwargs['optional_match'] = s
# generate subqueries
with_statements = ['WITH ' + ', '.join(withs)]
for node in nodes:
if not node.has_subquery:
continue
statement = node.subquery(withs, self._criterion)
with_statements.append(statement)
withs.update(node.withs)
for node in optional_nodes:
if not node.has_subquery:
continue
statement = node.subquery(withs, self._criterion, optional=True)
with_statements.append(statement)
withs.update(node.withs)
kwargs['with'] = '\n'.join(with_statements)
kwargs['return'] = self.generate_return()
cypher = self.query_template.format(**kwargs)
return cypher
def create_subset(self, label):
self._set_labels.append(label)
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
self._set_labels = []
def remove_subset(self, label):
self._remove_labels.append(label)
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
self._remove_labels = []
def delete(self):
"""
Remove the results of a query from the graph. CAUTION: this is
irreversible.
"""
self._delete = True
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
def set_properties(self, **kwargs):
self._set_properties = {k: v for k,v in kwargs.items()}
print(self.cypher(), self.cypher_params())
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
self._set_properties = {}
def all(self):
return BaseQueryResults(self)
def get(self):
r = BaseQueryResults(self)
if len(r) > 1:
raise Exception("Can't use get on query with more than one result.")
return r[0]
def cypher_params(self):
from ..base.complex import ComplexClause
from ..base.elements import SubsetClauseElement, NotSubsetClauseElement
from ..base.attributes import NodeAttribute
params = {}
for c in self._criterion:
if isinstance(c, ComplexClause):
params.update(c.generate_params())
elif isinstance(c, (SubsetClauseElement, NotSubsetClauseElement)):
pass
else:
try:
if not isinstance(c.value, NodeAttribute):
params[c.cypher_value_string()[1:-1].replace('`', '')] = c.value
except AttributeError:
pass
return params
def generate_return(self):
"""
Generates final statement from query object, calling whichever one of the other generate statements is specified in the query obj
Parameters
----------
query : :class: `~polyglotdb.graph.GraphQuery`
a query object
Returns
-------
str
cypher formatted string
"""
if self._delete:
statement = self._generate_delete_return()
elif self._cache:
statement = self._generate_cache_return()
elif self._set_properties:
statement = self._generate_set_properties_return()
elif self._set_labels:
statement = self._generate_set_labels_return()
elif self._remove_labels:
statement = self._generate_remove_labels_return()
elif self._aggregate:
statement = self._generate_aggregate_return()
else:
statement = self._generate_distinct_return()
return statement
def _generate_delete_return(self):
kwargs = {}
kwargs['alias'] = self.to_find.alias
return_statement = self.delete_template.format(**kwargs)
return return_statement
def _generate_cache_return(self):
properties = []
for c in self._cache:
kwargs = {'alias': c.node.cache_alias,
'attribute': c.output_alias,
'value': c.for_cypher()
}
if c.label == 'position':
kwargs['alias'] = self.to_find.alias
set_string = self.set_property_template.format(**kwargs)
properties.append(set_string)
return 'SET {}'.format(', '.join(properties))
def _generate_remove_labels_return(self):
remove_label_strings = []
kwargs = {}
kwargs['alias'] = self.to_find.alias
kwargs['value'] = ':' + ':'.join(map(key_for_cypher, self._remove_labels))
remove_label_strings.append(self.remove_label_template.format(**kwargs))
return_statement = ''
if remove_label_strings:
if return_statement:
return_statement += '\nWITH {alias}\n'.format(alias=self.to_find.alias)
return_statement += '\nREMOVE ' + ', '.join(remove_label_strings)
return return_statement
def _generate_set_properties_return(self):
set_strings = []
for k, v in self._set_properties.items():
if v is None:
v = 'NULL'
else:
v = value_for_cypher(v)
s = self.set_property_template.format(alias=self.to_find.alias, attribute=k, value=v)
set_strings.append(s)
return 'SET ' + ', '.join(set_strings)
def _generate_set_labels_return(self):
set_label_strings = []
kwargs = {}
kwargs['alias'] = self.to_find.alias
kwargs['value'] = ':' + ':'.join(map(key_for_cypher, self._set_labels))
set_label_strings.append(self.set_label_template.format(**kwargs))
return 'SET ' + ', '.join(set_label_strings)
def _generate_aggregate_return(self):
kwargs = {'order_by': self._generate_order_by(),
'limit': self._generate_limit()}
properties = []
for g in self._group_by:
properties.append(g.aliased_for_output())
if any(not x.collapsing for x in self._aggregate):
for c in self._columns:
properties.append(c.aliased_for_output())
if len(self._order_by) == 0 and len(self._group_by) > 0:
self._order_by.append((self._group_by[0], False))
for a in self._aggregate:
properties.append(a.aliased_for_output())
kwargs['aggregates'] = ', '.join(properties)
return self.aggregate_template.format(**kwargs)
def _generate_distinct_return(self):
kwargs = {'order_by': self._generate_order_by(),
'limit': self._generate_limit(),
'offset': self._generate_offset()}
properties = []
for c in self._columns + self._hidden_columns:
properties.append(c.aliased_for_output())
if not properties:
properties = self.to_find.withs
for a in self._preload:
properties.extend(a.withs)
kwargs['columns'] = ', '.join(properties)
return self.distinct_template.format(**kwargs)
def _generate_limit(self):
if self._limit is not None:
return '\nLIMIT {}'.format(self._limit)
return ''
def _generate_offset(self):
if self._offset is not None:
return '\nSKIP {}'.format(self._offset)
return ''
def _generate_order_by(self):
properties = []
for c in self._order_by:
ac_set = set(self._columns)
gb_set = set(self._group_by)
h_c = hash(c[0])
for col in ac_set:
if h_c == hash(col):
element = col.for_cypher()
break
else:
for col in gb_set:
if h_c == hash(col):
element = col.for_cypher()
break
else:
element = c[0].for_cypher()
# query.columns(c[0])
if c[1]:
element += ' DESC'
properties.append(element)
if properties:
return '\nORDER BY ' + ', '.join(properties)
return ''
| mit |
tigersirvine/occtigerscricket | django/core/management/commands/reset.py | 78 | 2540 | from optparse import make_option
from django.core.management.base import AppCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_reset
from django.db import connections, transaction, DEFAULT_DB_ALIAS
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to reset. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlreset`` for the given app(s) in the current database."
args = '[appname ...]'
output_transaction = True
def handle_app(self, app, **options):
# This command breaks a lot and should be deprecated
import warnings
warnings.warn(
'This command has been deprecated. The command ``flush`` can be used to delete everything. You can also use ALTER TABLE or DROP TABLE statements manually.',
DeprecationWarning
)
using = options.get('database')
connection = connections[using]
app_name = app.__name__.split('.')[-2]
self.style = no_style()
sql_list = sql_reset(app, self.style, connection)
if options.get('interactive'):
confirm = raw_input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY any data for
the "%s" application in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (app_name, connection.settings_dict['NAME']))
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed()
raise CommandError("""Error: %s couldn't be reset. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlreset %s'. That's the SQL this command wasn't able to run.
The full error: %s""" % (app_name, app_name, e))
transaction.commit_unless_managed()
else:
print "Reset cancelled."
| bsd-3-clause |
sheadovas/tools | misc/plotter.py | 1 | 1780 | #!/usr/bin/python
# created by shead
import sys
import numpy as np
import matplotlib.pyplot as plt
import pylab
"""
USAGE
============
./plotter.py [log]
./plotter.py my_log.log
REQUIRED DEPENDENCIES
============
* Python2
* Matplot http://matplotlib.org/users/installing.html
FILE FORMAT
============
[iteration] [amount_of_cmp] [amount_of_swaps]
...
EXAMPLE FILE
============
10 1 2
20 30 121
"""
def load_data_from_file(filename, data_size, data_cmp, data_swp):
with open(filename, 'r') as f:
for line in f:
raw = line.split()
data_size.append(int(raw[0]))
data_cmp.append(int(raw[1]))
data_swp.append(int(raw[2]))
# func from docs
def autolabel(rects, ax):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2.0, 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
def main(argv):
if len(argv) != 2:
print 'USAGE: plotter [path_to_log]'
sys.exit(1)
data_size = []
data_cmp = []
data_swp = []
load_data_from_file(argv[1], data_size, data_cmp, data_swp)
# plot
N = len(data_size)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, data_cmp, width, color='r')
rects2 = ax.bar(ind + width, data_swp, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Values')
title = argv[1].split('.')[0]
ax.set_title(title)
#ax.set_xticks(ind + width)
#x.set_xticklabels(data_size)
ax.legend((rects1[0], rects2[0]), ('cmp', 'swp'))
#autolabel(rects1, ax)
#autolabel(rects2, ax)
fname = '%s.png' % (title)
pylab.savefig(fname, dpi=333)
print 'Saved to %s' % fname
if __name__ == "__main__":
main(sys.argv) | mit |
mihaip/NewsBlur | vendor/yaml/tokens.py | 985 | 2573 |
class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__dict__
if not key.endswith('_mark')]
attributes.sort()
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
#class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class DocumentStartToken(Token):
id = '<document start>'
class DocumentEndToken(Token):
id = '<document end>'
class StreamStartToken(Token):
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None,
encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndToken(Token):
id = '<stream end>'
class BlockSequenceStartToken(Token):
id = '<block sequence start>'
class BlockMappingStartToken(Token):
id = '<block mapping start>'
class BlockEndToken(Token):
id = '<block end>'
class FlowSequenceStartToken(Token):
id = '['
class FlowMappingStartToken(Token):
id = '{'
class FlowSequenceEndToken(Token):
id = ']'
class FlowMappingEndToken(Token):
id = '}'
class KeyToken(Token):
id = '?'
class ValueToken(Token):
id = ':'
class BlockEntryToken(Token):
id = '-'
class FlowEntryToken(Token):
id = ','
class AliasToken(Token):
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class AnchorToken(Token):
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class TagToken(Token):
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class ScalarToken(Token):
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
| mit |
rdeheele/odoo | openerp/tools/pdf_utils.py | 456 | 3659 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
compmech/meshless | meshless/espim/plate2d_add_k0s.py | 1 | 1946 | from meshless.espim.plate2d_add_k0s_cell_based import add_k0s as add_k0s_cell
from meshless.espim.plate2d_add_k0s_cell_based_no_smoothing import add_k0s as add_k0s_cell_no_smoothing
from meshless.espim.plate2d_add_k0s_edge_based import add_k0s as add_k0s_edge
def add_k0s(k0, mesh, prop_from_node, method='cell-based', alpha=0.08,
maxl_from_area=False):
"""Add the transverse shear stiffness to an existing consitutive stiffness
matrix
The transverse shear stiffness is computed using the Discrete Shear Gap
method, with a correction that uses parameter `alpha`
Parameters
----------
k0 : (N, N) array-like
Existing stiffness matrix. This object is modified in-place
mesh : :class:`pyNastran.bdf.BDF` object
The object must have the proper edge references as those returned by
:func:`.read_mesh` or :func:`.read_delaunay`
prop_from_node : bool
If the constitutive properties are assigned per node. Otherwise they
are considered assigned per element
method : str, optional
The smoothing method for the transverse shear
alpha : float, optional
Positive constant used in the correction applied to the transverse
shear stiffness
maxl_from_area : bool, optional
If maxl, used in Lyly`s formula, should be sqrt(area). It uses the
maximum edge length otherwise.
"""
#alpha between 0. and 0.6, according to studies of Lyly et al.
if method == 'cell-based':
return add_k0s_cell(k0, mesh, prop_from_node, alpha=alpha, maxl_from_area=maxl_from_area)
elif method == 'cell-based-no-smoothing':
return add_k0s_cell_no_smoothing(k0, mesh, prop_from_node, alpha=alpha, maxl_from_area=maxl_from_area)
elif method == 'edge-based':
return add_k0s_edge(k0, mesh, prop_from_node, alpha=alpha, maxl_from_area=maxl_from_area)
else:
raise ValueError('Invalid method')
| bsd-2-clause |
mrunge/horizon | openstack_dashboard/dashboards/admin/info/tables.py | 11 | 6531 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils import filters as utils_filters
SERVICE_ENABLED = "enabled"
SERVICE_DISABLED = "disabled"
SERVICE_STATUS_DISPLAY_CHOICES = (
(SERVICE_ENABLED, _("Enabled")),
(SERVICE_DISABLED, _("Disabled")),
)
class ServiceFilterAction(tables.FilterAction):
filter_field = 'type'
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
attr = getattr(service, self.filter_field, '')
if attr is not None and q in attr.lower():
return True
return False
return filter(comp, services)
class SubServiceFilterAction(ServiceFilterAction):
filter_field = 'binary'
def get_stats(service):
return template.loader.render_to_string('admin/services/_stats.html',
{'service': service})
def get_status(service):
# if not configured in this region, neither option makes sense
if service.host:
return SERVICE_ENABLED if not service.disabled else SERVICE_DISABLED
return None
class ServicesTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column("name", verbose_name=_('Name'))
service_type = tables.Column('__unicode__', verbose_name=_('Service'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_status,
verbose_name=_('Status'),
status=True,
display_choices=SERVICE_STATUS_DISPLAY_CHOICES)
class Meta:
name = "services"
verbose_name = _("Services")
table_actions = (ServiceFilterAction,)
multi_select = False
status_columns = ["status"]
def get_available(zone):
return zone.zoneState['available']
def get_nova_agent_status(agent):
template_name = 'admin/info/_cell_status.html'
context = {
'status': agent.status,
'disabled_reason': agent.disabled_reason
}
return template.loader.render_to_string(template_name, context)
class NovaServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column(get_nova_agent_status, verbose_name=_('Status'))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title,))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "nova_services"
verbose_name = _("Compute Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class CinderServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column('status', verbose_name=_('Status'),
filters=(filters.title, ))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title, ))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "cinder_services"
verbose_name = _("Block Storage Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class NetworkAgentsFilterAction(tables.FilterAction):
def filter(self, table, agents, filter_string):
q = filter_string.lower()
def comp(agent):
if q in agent.agent_type.lower():
return True
return False
return filter(comp, agents)
def get_network_agent_status(agent):
if agent.admin_state_up:
return _('Enabled')
return _('Disabled')
def get_network_agent_state(agent):
if agent.alive:
return _('Up')
return _('Down')
class NetworkAgentsTable(tables.DataTable):
agent_type = tables.Column('agent_type', verbose_name=_('Type'))
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_network_agent_status, verbose_name=_('Status'))
state = tables.Column(get_network_agent_state, verbose_name=_('State'))
heartbeat_timestamp = tables.Column('heartbeat_timestamp',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s" % (obj.binary, obj.host)
class Meta:
name = "network_agents"
verbose_name = _("Network Agents")
table_actions = (NetworkAgentsFilterAction,)
multi_select = False
| apache-2.0 |
Team-Blackout/BeastMode-Evita | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
dcosentino/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_modulestore_settings.py | 41 | 8713 | """
Tests for testing the modulestore settings migration code.
"""
import copy
import ddt
from tempfile import mkdtemp
from unittest import TestCase
from xmodule.modulestore.modulestore_settings import (
convert_module_store_setting_if_needed,
update_module_store_settings,
get_mixed_stores,
)
@ddt.ddt
class ModuleStoreSettingsMigration(TestCase):
"""
Tests for the migration code for the module store settings
"""
OLD_CONFIG = {
"default": {
"ENGINE": "xmodule.modulestore.xml.XMLModuleStore",
"OPTIONS": {
"data_dir": "directory",
"default_class": "xmodule.hidden_module.HiddenDescriptor",
},
"DOC_STORE_CONFIG": {},
}
}
OLD_CONFIG_WITH_DIRECT_MONGO = {
"default": {
"ENGINE": "xmodule.modulestore.mongo.MongoModuleStore",
"OPTIONS": {
"collection": "modulestore",
"db": "edxapp",
"default_class": "xmodule.hidden_module.HiddenDescriptor",
"fs_root": mkdtemp(),
"host": "localhost",
"password": "password",
"port": 27017,
"render_template": "edxmako.shortcuts.render_to_string",
"user": "edxapp"
},
"DOC_STORE_CONFIG": {},
}
}
OLD_MIXED_CONFIG_WITH_DICT = {
"default": {
"ENGINE": "xmodule.modulestore.mixed.MixedModuleStore",
"OPTIONS": {
"mappings": {},
"stores": {
"an_old_mongo_store": {
"DOC_STORE_CONFIG": {},
"ENGINE": "xmodule.modulestore.mongo.MongoModuleStore",
"OPTIONS": {
"collection": "modulestore",
"db": "test",
"default_class": "xmodule.hidden_module.HiddenDescriptor",
}
},
"default": {
"ENGINE": "the_default_store",
"OPTIONS": {
"option1": "value1",
"option2": "value2"
},
"DOC_STORE_CONFIG": {}
},
"xml": {
"ENGINE": "xmodule.modulestore.xml.XMLModuleStore",
"OPTIONS": {
"data_dir": "directory",
"default_class": "xmodule.hidden_module.HiddenDescriptor"
},
"DOC_STORE_CONFIG": {}
}
}
}
}
}
ALREADY_UPDATED_MIXED_CONFIG = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': {},
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': "fs_root",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': {},
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': "fs_root",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
]
}
}
}
def assertStoreValuesEqual(self, store_setting1, store_setting2):
"""
Tests whether the fields in the given store_settings are equal.
"""
store_fields = ["OPTIONS", "DOC_STORE_CONFIG"]
for field in store_fields:
self.assertEqual(store_setting1[field], store_setting2[field])
def assertMigrated(self, old_setting):
"""
Migrates the given setting and checks whether it correctly converted
to an ordered list of stores within Mixed.
"""
# pass a copy of the old setting since the migration modifies the given setting
new_mixed_setting = convert_module_store_setting_if_needed(copy.deepcopy(old_setting))
# check whether the configuration is encapsulated within Mixed.
self.assertEqual(new_mixed_setting["default"]["ENGINE"], "xmodule.modulestore.mixed.MixedModuleStore")
# check whether the stores are in an ordered list
new_stores = get_mixed_stores(new_mixed_setting)
self.assertIsInstance(new_stores, list)
return new_mixed_setting, new_stores[0]
def is_split_configured(self, mixed_setting):
"""
Tests whether the split module store is configured in the given setting.
"""
stores = get_mixed_stores(mixed_setting)
split_settings = [store for store in stores if store['ENGINE'].endswith('.DraftVersioningModuleStore')]
if len(split_settings):
# there should only be one setting for split
self.assertEquals(len(split_settings), 1)
# verify name
self.assertEquals(split_settings[0]['NAME'], 'split')
# verify split config settings equal those of mongo
self.assertStoreValuesEqual(
split_settings[0],
next((store for store in stores if 'DraftModuleStore' in store['ENGINE']), None)
)
return len(split_settings) > 0
def test_convert_into_mixed(self):
old_setting = self.OLD_CONFIG
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_setting)
self.assertStoreValuesEqual(new_default_store_setting, old_setting["default"])
self.assertEqual(new_default_store_setting["ENGINE"], old_setting["default"]["ENGINE"])
self.assertFalse(self.is_split_configured(new_mixed_setting))
def test_convert_from_old_mongo_to_draft_store(self):
old_setting = self.OLD_CONFIG_WITH_DIRECT_MONGO
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_setting)
self.assertStoreValuesEqual(new_default_store_setting, old_setting["default"])
self.assertEqual(new_default_store_setting["ENGINE"], "xmodule.modulestore.mongo.draft.DraftModuleStore")
self.assertTrue(self.is_split_configured(new_mixed_setting))
def test_convert_from_dict_to_list(self):
old_mixed_setting = self.OLD_MIXED_CONFIG_WITH_DICT
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_mixed_setting)
self.assertEqual(new_default_store_setting["ENGINE"], "the_default_store")
self.assertTrue(self.is_split_configured(new_mixed_setting))
# exclude split when comparing old and new, since split was added as part of the migration
new_stores = [store for store in get_mixed_stores(new_mixed_setting) if store['NAME'] != 'split']
old_stores = get_mixed_stores(self.OLD_MIXED_CONFIG_WITH_DICT)
# compare each store configured in mixed
self.assertEqual(len(new_stores), len(old_stores))
for new_store in new_stores:
self.assertStoreValuesEqual(new_store, old_stores[new_store['NAME']])
def test_no_conversion(self):
# make sure there is no migration done on an already updated config
old_mixed_setting = self.ALREADY_UPDATED_MIXED_CONFIG
new_mixed_setting, new_default_store_setting = self.assertMigrated(old_mixed_setting)
self.assertTrue(self.is_split_configured(new_mixed_setting))
self.assertEquals(old_mixed_setting, new_mixed_setting)
@ddt.data('draft', 'split')
def test_update_settings(self, default_store):
mixed_setting = self.ALREADY_UPDATED_MIXED_CONFIG
update_module_store_settings(mixed_setting, default_store=default_store)
self.assertTrue(get_mixed_stores(mixed_setting)[0]['NAME'] == default_store)
def test_update_settings_error(self):
mixed_setting = self.ALREADY_UPDATED_MIXED_CONFIG
with self.assertRaises(Exception):
update_module_store_settings(mixed_setting, default_store='non-existent store')
| agpl-3.0 |
kunthar/hustle | test/test_pipeline.py | 3 | 6724 | import unittest
from hustle.core.column_fn import ip_ntoa
from hustle.core.pipeline import SelectPipe, _get_sort_range
from hustle.core.marble import Marble
from operator import itemgetter
EMP_FIELDS = ("+@2id", "+$name", "+%2hire_date", "+@4salary", "+@2department_id")
DEPT_FIELDS = ("+@2id", "+%2name", "+%2building", "+@2manager_id")
def first_items(items):
first = itemgetter(0)
return [first(item) for item in items]
def second_items(items):
first = itemgetter(1)
return [first(item) for item in items]
class TestPipeline(unittest.TestCase):
def setUp(self):
self.emp = Marble(name="employee",
fields=EMP_FIELDS)
self.dept = Marble(name="department",
fields=DEPT_FIELDS)
def test_get_key_names(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, self.emp.salary, self.dept.building]
pipe = SelectPipe('server', wheres=wheres, project=project)
self.assertTupleEqual(('name', 'salary', None), tuple(first_items(pipe._get_key_names(project, ())[0])))
self.assertTupleEqual((None, None, 'building'), tuple(first_items(pipe._get_key_names(project, ())[1])))
join = [self.dept.id, self.emp.department_id]
pipe = SelectPipe('server', wheres=wheres, project=project, join=join)
self.assertTupleEqual(('department_id', 'name', 'salary', None), tuple(first_items(pipe._get_key_names(project, join)[0])))
self.assertTupleEqual(('id', None, None, 'building'), tuple(first_items(pipe._get_key_names(project, join)[1])))
project = [self.dept.building, self.emp.name, self.emp.salary]
pipe = SelectPipe('server', wheres=wheres, project=project, join=join)
self.assertTupleEqual(('department_id', None, 'name', 'salary'), tuple(first_items(pipe._get_key_names(project, join)[0])))
self.assertTupleEqual(('id', 'building', None, None), tuple(first_items(pipe._get_key_names(project, join)[1])))
def test_get_key_names_with_column_fn(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, ip_ntoa(self.emp.salary), self.dept.building]
pipe = SelectPipe('server', wheres=wheres, project=project)
self.assertTupleEqual((None, None, None), tuple(second_items(pipe._get_key_names(project, ())[1])))
join = [self.dept.id, self.emp.department_id]
pipe = SelectPipe('server', wheres=wheres, project=project, join=join)
self.assertTupleEqual((None, None, None, None), tuple(second_items(pipe._get_key_names(project, join)[1])))
def test_get_sort_range(self):
project = [self.emp.name, self.emp.salary, self.dept.building]
order_by = []
# first case is with an empty order_by, it should sort by all columns
sort_range = _get_sort_range(2, project, order_by)
self.assertTupleEqual(tuple(sort_range), (2, 3, 4))
sort_range = _get_sort_range(0, project, order_by)
self.assertTupleEqual(tuple(sort_range), (0, 1, 2))
# test with a specified order_by, note that we should always be sorting all columns - the order_by
# just specifies the order. The unspecified columns are not in a defined order.
order_by = [self.emp.salary]
sort_range = _get_sort_range(2, project, order_by)
self.assertEqual(len(sort_range), 3)
self.assertEqual(sort_range[0], 3)
order_by = [self.dept.building, self.emp.name]
sort_range = _get_sort_range(1, project, order_by)
self.assertEqual(len(sort_range), 3)
self.assertTupleEqual(sort_range[:2], (3, 1))
def test_get_pipeline(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, self.emp.salary, self.dept.building]
pipe = SelectPipe('server',
wheres=wheres,
project=project)
#(SPLIT, HustleStage('restrict-project',
# process=partial(process_restrict, jobobj=job),
# input_chain=[partial(hustle_stream, jobobj=job)]))
pipeline = pipe.pipeline
self.assertEqual(len(pipeline), 1)
self.assertEqual('split', pipeline[0][0])
self.assertEqual('restrict-select', pipeline[0][1].name)
order_by = [self.dept.building, self.emp.name]
pipe = SelectPipe('server',
wheres=wheres,
project=project,
order_by=order_by)
#(SPLIT, HustleStage('restrict-project',
# process=partial(process_restrict, jobobj=job),
# input_chain=[partial(hustle_stream, jobobj=job)])),
#(GROUP_LABEL, HustleStage('order',
# process=partial(process_order, jobobj=job, distinct=job.distinct),
# sort=sort_range))]
pipeline = pipe.pipeline
self.assertEqual(len(pipeline), 3)
self.assertEqual('split', pipeline[0][0])
self.assertEqual('group_node_label', pipeline[1][0])
self.assertEqual('order-combine', pipeline[1][1].name)
order_by = [self.dept.building, self.emp.name]
join = [self.dept.id, self.emp.department_id]
pipe = SelectPipe('server',
wheres=wheres,
project=project,
order_by=order_by,
join=join)
pipeline = pipe.pipeline
self.assertEqual(len(pipeline), 4)
self.assertEqual('split', pipeline[0][0])
self.assertEqual('group_label', pipeline[1][0])
self.assertEqual('join', pipeline[1][1].name)
self.assertEqual('group_all', pipeline[3][0])
self.assertEqual('order-reduce', pipeline[3][1].name)
def test_column_aliases_project(self):
wheres = [(self.emp.salary > 25000), self.dept]
project = [self.emp.name, self.emp.salary, self.dept.building, self.dept.name]
order_by = ['name', 'employee.salary', self.dept.building, 3]
join = [self.emp.name, self.dept.name]
pipe = SelectPipe('server',
wheres=wheres,
project=project,
order_by=order_by,
join=join)
self.assertEqual(len(pipe.order_by), 4)
self.assertEqual(pipe.order_by[0], self.emp.name)
self.assertEqual(pipe.order_by[1], self.emp.salary)
self.assertEqual(pipe.order_by[2], self.dept.building)
self.assertEqual(pipe.order_by[0], self.dept.name)
| mit |
joshbruning/selenium | py/selenium/webdriver/phantomjs/service.py | 53 | 2587 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tempfile
from selenium.webdriver.common import service
class Service(service.Service):
"""
Object that manages the starting and stopping of PhantomJS / Ghostdriver
"""
def __init__(self, executable_path, port=0, service_args=None, log_path=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to PhantomJS binary
- port : Port the service is running on
- service_args : A List of other command line options to pass to PhantomJS
- log_path: Path for PhantomJS service to log to
"""
self.service_args = service_args
if self.service_args is None:
self.service_args = []
else:
self.service_args = service_args[:]
if not log_path:
log_path = "ghostdriver.log"
if not self._args_contain("--cookies-file="):
self._cookie_temp_file_handle, self._cookie_temp_file = tempfile.mkstemp()
self.service_args.append("--cookies-file=" + self._cookie_temp_file)
else:
self._cookie_temp_file = None
service.Service.__init__(self, executable_path, port=port, log_file=open(log_path, 'w'))
def _args_contain(self, arg):
return len(list(filter(lambda x: x.startswith(arg), self.service_args))) > 0
def command_line_args(self):
return self.service_args + ["--webdriver=%d" % self.port]
@property
def service_url(self):
"""
Gets the url of the GhostDriver Service
"""
return "http://localhost:%d/wd/hub" % self.port
def send_remote_shutdown_command(self):
if self._cookie_temp_file:
os.close(self._cookie_temp_file_handle)
os.remove(self._cookie_temp_file)
| apache-2.0 |
fayvlad/ArduinoJson | third-party/gtest-1.7.0/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = '[email protected] (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
dushu1203/chromium.src | tools/perf/page_sets/simple_mobile_sites.py | 9 | 1794 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SimplePage(page_module.Page):
def __init__(self, url, page_set):
super(SimplePage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json')
self.archive_data_file = 'data/simple_mobile_sites.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
# TODO(epenner): Remove this wait (http://crbug.com/366933)
action_runner.Wait(5)
class SimpleScrollPage(SimplePage):
def __init__(self, url, page_set):
super(SimpleScrollPage, self).__init__(url=url, page_set=page_set)
def RunPageInteractions(self, action_runner):
# Make the scroll longer to reduce noise.
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(direction='down', speed_in_pixels_per_second=300)
interaction.End()
class SimpleMobileSitesPageSet(page_set_module.PageSet):
""" Simple mobile sites """
def __init__(self):
super(SimpleMobileSitesPageSet, self).__init__(
user_agent_type='tablet_10_inch',
archive_data_file='data/simple_mobile_sites.json',
bucket=page_set_module.PUBLIC_BUCKET)
scroll_page_list = [
# Why: Scrolls moderately complex pages (up to 60 layers)
'http://www.ebay.co.uk/',
'https://www.flickr.com/',
'http://www.apple.com/mac/',
'http://www.nyc.gov',
'http://m.nytimes.com/'
]
for url in scroll_page_list:
self.AddUserStory(SimpleScrollPage(url, self))
| bsd-3-clause |
maxdeliso/elevatorSim | Lib/test/test_iterlen.py | 59 | 7747 | """ Test Iterator Length Transparency
Some functions or methods which accept general iterable arguments have
optional, more efficient code paths if they know how many items to expect.
For instance, map(func, iterable), will pre-allocate the exact amount of
space required whenever the iterable can report its length.
The desired invariant is: len(it)==len(list(it)).
A complication is that an iterable and iterator can be the same object. To
maintain the invariant, an iterator needs to dynamically update its length.
For instance, an iterable such as range(10) always reports its length as ten,
but it=iter(range(10)) starts at ten, and then goes to nine after next(it).
Having this capability means that map() can ignore the distinction between
map(func, iterable) and map(func, iter(iterable)).
When the iterable is immutable, the implementation can straight-forwardly
report the original length minus the cumulative number of calls to next().
This is the case for tuples, range objects, and itertools.repeat().
Some containers become temporarily immutable during iteration. This includes
dicts, sets, and collections.deque. Their implementation is equally simple
though they need to permanently set their length to zero whenever there is
an attempt to iterate after a length mutation.
The situation slightly more involved whenever an object allows length mutation
during iteration. Lists and sequence iterators are dynamically updatable.
So, if a list is extended during iteration, the iterator will continue through
the new items. If it shrinks to a point before the most recent iteration,
then no further items are available and the length is reported at zero.
Reversed objects can also be wrapped around mutable objects; however, any
appends after the current position are ignored. Any other approach leads
to confusion and possibly returning the same item more than once.
The iterators not listed above, such as enumerate and the other itertools,
are not length transparent because they have no way to distinguish between
iterables that report static length and iterators whose length changes with
each call (i.e. the difference between enumerate('abc') and
enumerate(iter('abc')).
"""
import unittest
from test import support
from itertools import repeat
from collections import deque
from builtins import len as _len
n = 10
def len(obj):
try:
return _len(obj)
except TypeError:
try:
# note: this is an internal undocumented API,
# don't rely on it in your own programs
return obj.__length_hint__()
except AttributeError:
raise TypeError
class TestInvariantWithoutMutations(unittest.TestCase):
def test_invariant(self):
it = self.it
for i in reversed(range(1, n+1)):
self.assertEqual(len(it), i)
next(it)
self.assertEqual(len(it), 0)
self.assertRaises(StopIteration, next, it)
self.assertEqual(len(it), 0)
class TestTemporarilyImmutable(TestInvariantWithoutMutations):
def test_immutable_during_iteration(self):
# objects such as deques, sets, and dictionaries enforce
# length immutability during iteration
it = self.it
self.assertEqual(len(it), n)
next(it)
self.assertEqual(len(it), n-1)
self.mutate()
self.assertRaises(RuntimeError, next, it)
self.assertEqual(len(it), 0)
## ------- Concrete Type Tests -------
class TestRepeat(TestInvariantWithoutMutations):
def setUp(self):
self.it = repeat(None, n)
def test_no_len_for_infinite_repeat(self):
# The repeat() object can also be infinite
self.assertRaises(TypeError, len, repeat(None))
class TestXrange(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(range(n))
class TestXrangeCustomReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(range(n))
class TestTuple(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(tuple(range(n)))
## ------- Types that should not be mutated during iteration -------
class TestDeque(TestTemporarilyImmutable):
def setUp(self):
d = deque(range(n))
self.it = iter(d)
self.mutate = d.pop
class TestDequeReversed(TestTemporarilyImmutable):
def setUp(self):
d = deque(range(n))
self.it = reversed(d)
self.mutate = d.pop
class TestDictKeys(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d)
self.mutate = d.popitem
class TestDictItems(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.items())
self.mutate = d.popitem
class TestDictValues(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.values())
self.mutate = d.popitem
class TestSet(TestTemporarilyImmutable):
def setUp(self):
d = set(range(n))
self.it = iter(d)
self.mutate = d.pop
## ------- Types that can mutate during iteration -------
class TestList(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(range(n))
def test_mutation(self):
d = list(range(n))
it = iter(d)
next(it)
next(it)
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-1) # grow with append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), [])
d.extend(range(20))
self.assertEqual(len(it), 0)
class TestListReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(range(n))
def test_mutation(self):
d = list(range(n))
it = reversed(d)
next(it)
next(it)
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-2) # ignore append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), []) # confirm invariant
d.extend(range(20))
self.assertEqual(len(it), 0)
## -- Check to make sure exceptions are not suppressed by __length_hint__()
class BadLen(object):
def __iter__(self): return iter(range(10))
def __len__(self):
raise RuntimeError('hello')
class BadLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
raise RuntimeError('hello')
class NoneLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
return None
class TestLengthHintExceptions(unittest.TestCase):
def test_issue1242657(self):
self.assertRaises(RuntimeError, list, BadLen())
self.assertRaises(RuntimeError, list, BadLengthHint())
self.assertRaises(RuntimeError, [].extend, BadLen())
self.assertRaises(RuntimeError, [].extend, BadLengthHint())
b = bytearray(range(10))
self.assertRaises(RuntimeError, b.extend, BadLen())
self.assertRaises(RuntimeError, b.extend, BadLengthHint())
def test_invalid_hint(self):
# Make sure an invalid result doesn't muck-up the works
self.assertEqual(list(NoneLengthHint()), list(range(10)))
def test_main():
unittests = [
TestRepeat,
TestXrange,
TestXrangeCustomReversed,
TestTuple,
TestDeque,
TestDequeReversed,
TestDictKeys,
TestDictItems,
TestDictValues,
TestSet,
TestList,
TestListReversed,
TestLengthHintExceptions,
]
support.run_unittest(*unittests)
if __name__ == "__main__":
test_main()
| bsd-2-clause |
jbrahy/capstone | bindings/python/test_skipdata.py | 31 | 2425 | #!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <[email protected]>
from __future__ import print_function
from capstone import *
import binascii
from xprint import to_x, to_hex, to_x_32
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\x00\x91\x92"
RANDOM_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
all_tests = (
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", 0),
(CS_ARCH_ARM, CS_MODE_ARM, RANDOM_CODE, "Arm", 0),
)
# ## Test cs_disasm_quick()
def test_cs_disasm_quick():
for (arch, mode, code, comment, syntax) in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:"),
print(to_hex(code))
for insn in cs_disasm_quick(arch, mode, code, 0x1000):
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print
# Sample callback for SKIPDATA option
def testcb(buffer, size, offset, userdata):
# always skip 2 bytes of data
return 2
# ## Test class Cs
def test_class():
for (arch, mode, code, comment, syntax) in all_tests:
print('*' * 16)
print("Platform: %s" %comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax != 0:
md.syntax = syntax
md.skipdata = True
# Default "data" instruction's name is ".byte". To rename it to "db", just uncomment
# the code below.
# md.skipdata_setup = ("db", None, None)
# NOTE: This example ignores SKIPDATA's callback (first None) & user_data (second None)
# To customize the SKIPDATA callback, uncomment the line below.
# md.skipdata_setup = (".db", CS_SKIPDATA_CALLBACK(testcb), None)
for insn in md.disasm(code, 0x1000):
#bytes = binascii.hexlify(insn.bytes)
#print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes))
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print("0x%x:" % (insn.address + insn.size))
print
except CsError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_class()
| bsd-3-clause |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/builder.py | 1 | 7693 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.catalogbuilder Contains the CatalogBuilder class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ...core.basics.configurable import OldConfigurable
from ...core.tools import tables
# -----------------------------------------------------------------
class CatalogBuilder(OldConfigurable):
"""
This class ...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(CatalogBuilder, self).__init__(config, "magic")
# The image frame
self.frame = None
# References to the extractors
self.galaxy_extractor = None
self.star_extractor = None
self.trained_extractor = None
# The output catalogs
self.galactic_catalog = None
self.stellar_catalog = None
# -----------------------------------------------------------------
def run(self, frame, galaxy_extractor, star_extractor, trained_extractor):
"""
This function ...
:param frame:
:param galaxy_extractor:
:param star_extractor:
:param trained_extractor:
:return:
"""
# 1. Call the setup function
self.setup(frame, galaxy_extractor, star_extractor, trained_extractor)
# 2. Build the catalog
self.build()
# 3. Writing
self.write()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Set attributes to None
self.frame = None
self.galaxy_extractor = None
self.star_extractor = None
self.trained_extractor = None
# -----------------------------------------------------------------
def setup(self, frame, galaxy_extractor, star_extractor, trained_extractor):
"""
This function ...
:param frame:
:param galaxy_extractor:
:param star_extractor:
:param trained_extractor:
:return:
"""
# Call the setup function of the base class
super(CatalogBuilder, self).setup()
# The frame
self.frame = frame
# References to the extractors
self.galaxy_extractor = galaxy_extractor
self.star_extractor = star_extractor
self.trained_extractor = trained_extractor
# -----------------------------------------------------------------
def build(self):
"""
This function ...
:return:
"""
# Build the galactic catalog
self.build_galactic_catalog()
# Build the stellar catalog
self.build_stellar_catalog()
# -----------------------------------------------------------------
def build_galactic_catalog(self):
"""
This function ...
:return:
"""
# Set galactic catalog (no merging with trained extractor (yet) and undetected galaxies are included anyway)
self.galactic_catalog = self.galaxy_extractor.catalog
# -----------------------------------------------------------------
def build_stellar_catalog(self):
"""
This function ...
:return:
"""
# Initialize columns
catalog_column = []
id_column = []
ra_column = []
dec_column = []
ra_error_column = []
dec_error_column = []
confidence_level_column = []
on_galaxy_column = []
original_id_column = []
# Append stars from the star extractor; loop over the stellar statistics
for i in range(len(self.star_extractor.statistics)):
# Get the index of this star in the input catalog used by the star extractor
index = self.star_extractor.statistics["Star index"][i]
# Skip undetected stars
if not self.star_extractor.statistics["Detected"][i]: continue
# Add the appropriate values in the columns
catalog_column.append(self.star_extractor.catalog["Catalog"][index] if not (hasattr(self.star_extractor.catalog["Catalog"], "mask") and self.star_extractor.catalog["Catalog"].mask[index]) else None)
id_column.append(self.star_extractor.catalog["Id"][index] if not (hasattr(self.star_extractor.catalog["Id"], "mask") and self.star_extractor.catalog["Id"].mask[index]) else None)
ra_column.append(self.star_extractor.catalog["Right ascension"][index])
dec_column.append(self.star_extractor.catalog["Declination"][index])
ra_error_column.append(self.star_extractor.catalog["Right ascension error"][index])
dec_error_column.append(self.star_extractor.catalog["Declination error"][index])
confidence_level_column.append(self.star_extractor.catalog["Confidence level"][index])
on_galaxy_column.append(self.star_extractor.catalog["On galaxy"][index])
original_id_column.append(None)
#position_error = 0.5 * self.frame.average_pixelscale.to("mas/pix").value # in mas !!
x_position_error = 0.5 * self.frame.pixelscale.x.to("mas/pix").value
y_position_error = 0.5 * self.frame.pixelscale.y.to("mas/pix").value
# Append stars from the trained extractor; loop over the stars found by the trained extractor
for star in self.trained_extractor.stars:
# Add the appropriate values in the columns
catalog_column.append(None)
id_column.append(None)
ra_column.append(star.position.ra.value)
dec_column.append(star.position.dec.value)
ra_error_column.append(x_position_error)
dec_error_column.append(y_position_error)
confidence_level_column.append(star.confidence_level)
on_galaxy_column.append(False)
original_id_column.append(None)
data = [catalog_column, id_column, ra_column, dec_column, ra_error_column, dec_error_column, confidence_level_column,
on_galaxy_column, original_id_column]
names = ['Catalog', 'Id', 'Right ascension', 'Declination', 'Right ascension error', 'Declination error', 'Confidence level',
'On galaxy', 'Original catalog and id']
# Create the merged stellar catalog
self.stellar_catalog = tables.new(data, names)
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Write the galactic catalog
self.write_galactic_catalog()
# Write the stellar catalog
self.write_stellar_catalog()
# -----------------------------------------------------------------
def write_galactic_catalog(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def write_stellar_catalog(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
| mit |
foobarbazblarg/stayclean | stayclean-2017-march/participant.py | 60 | 1524 | import datetime
class Participant:
def __init__(self):
self.name = ""
self.isStillIn = True
self.hasCheckedIn = False
self.relapseDate = None
@property
def hasRelapsed(self):
return self.relapseDate is not None
def setFromLine(self, lineString):
# format of participants.txt line:
# name hasCheckedIn isStillIn
# e.g.:
# foobarbazblarg True True
words = lineString.split()
self.name = words[0]
self.hasCheckedIn = words[1] == 'True'
self.isStillIn = words[2] == 'True'
if len(words) >= 4:
self.relapseDate = datetime.datetime.strptime(words[3], "%Y.%m.%d").date()
def relapseNowIfNotAlready(self):
if self.isStillIn:
self.isStillIn = False
self.relapseDate = datetime.date.today()
def relapseDayOfWeekIndex(self):
if self.relapseDate:
return self.relapseDate.weekday()
else:
return None
def relapseDayOfWeekName(self):
if self.relapseDayOfWeekIndex():
return {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[self.relapseDayOfWeekIndex()]
else:
return None
def asLine(self):
answer = self.name + " " + str(self.hasCheckedIn) + " " + str(self.isStillIn)
if self.relapseDate:
answer += " "
answer += self.relapseDate.strftime("%Y.%m.%d")
return answer
| mit |
nskinkel/oppy | oppy/tests/integration/cell/test_fixedlen.py | 1 | 12689 | import struct
import unittest
from collections import OrderedDict
from oppy.cell.fixedlen import (
FixedLenCell,
Create2Cell,
Created2Cell,
CreatedFastCell,
CreatedCell,
CreateFastCell,
CreateCell,
DestroyCell,
EncryptedCell,
NetInfoCell,
PaddingCell,
)
from oppy.cell.util import TLVTriple
from oppy.tests.integration.cell.cellbase import FixedLenTestBase
CIRC_ID = 1
# Unit tests and constants for Create2Cell
CREATE2_CMD = 10
CREATE2_NTOR_HTYPE = 2
CREATE2_NTOR_HLEN = 84
CREATE2_NTOR_HDATA_DUMMY = "\x00" * CREATE2_NTOR_HLEN
create2_bytes_good = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
CREATE2_NTOR_HTYPE, CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
create2_bytes_good_padded = FixedLenCell.padCellBytes(create2_bytes_good)
assert len(create2_bytes_good_padded) == 512
create2_parse_bad_htype = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# ntor should be 2
1, CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
create2_parse_bad_htype = FixedLenCell.padCellBytes(create2_parse_bad_htype)
assert len(create2_parse_bad_htype) == 512
create2_parse_bad_hlen = struct.pack(
"!HBHH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# hlen should be 84 for ntor
CREATE2_NTOR_HTYPE, 83, CREATE2_NTOR_HDATA_DUMMY,
)
create2_parse_bad_hlen = FixedLenCell.padCellBytes(create2_parse_bad_hlen)
assert len(create2_parse_bad_hlen) == 512
# htype should be 2 for ntor
create2_make_bad_htype = (CIRC_ID, 1, CREATE2_NTOR_HLEN,
CREATE2_NTOR_HDATA_DUMMY)
# htype should be int not str
create2_make_bad_htype_2 = (CIRC_ID, str(CREATE2_NTOR_HTYPE),
CREATE2_NTOR_HLEN,
CREATE2_NTOR_HDATA_DUMMY)
# hlen should be 84 for ntor
create2_make_bad_hlen = (CIRC_ID, CREATE2_NTOR_HTYPE, 83,
CREATE2_NTOR_HDATA_DUMMY)
# len(hdata) == hlen must be true
create2_make_bad_hdata = (CIRC_ID, CREATE2_NTOR_HTYPE, CREATE2_NTOR_HLEN,
"\x00")
class Create2CellTests(FixedLenTestBase, unittest.TestCase):
# NOTE: Twisted unfortunately does not support `setUpClass()`, so we
# do actually need to call this before every test
def setUp(self):
self.cell_constants = {
'cell-bytes-good': create2_bytes_good_padded,
'cell-type': Create2Cell,
'cell-bytes-good-nopadding': create2_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = CREATE2_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['htype'] = CREATE2_NTOR_HTYPE
self.cell_attributes['hlen'] = CREATE2_NTOR_HLEN
self.cell_attributes['hdata'] = CREATE2_NTOR_HDATA_DUMMY
self.bad_parse_inputs = (create2_parse_bad_htype,
create2_parse_bad_hlen)
self.bad_make_inputs = (create2_make_bad_htype,
create2_make_bad_htype_2,
create2_make_bad_hlen,
create2_make_bad_hdata)
self.encrypted = False
# Unit tests and constants for Created2Cell
# we can reuse most of the values from Create2Cell for some constants
CREATED2_CMD = 11
created2_bytes_good = struct.pack(
"!HBH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATED2_CMD,
CREATE2_NTOR_HLEN, CREATE2_NTOR_HDATA_DUMMY,
)
created2_bytes_good_padded = FixedLenCell.padCellBytes(created2_bytes_good)
assert len(created2_bytes_good_padded) == 512
created2_parse_bad_hlen = struct.pack(
"!HBH{}s".format(CREATE2_NTOR_HLEN),
CIRC_ID, CREATE2_CMD,
# hlen should be 84 for ntor
83, CREATE2_NTOR_HDATA_DUMMY,
)
created2_parse_bad_hlen = FixedLenCell.padCellBytes(created2_parse_bad_hlen)
assert len(created2_parse_bad_hlen) == 512
# hlen should be 84 for ntor
created2_make_bad_hlen = (CIRC_ID, 83, CREATE2_NTOR_HDATA_DUMMY)
# len(hdata) == hlen must be true
created2_make_bad_hdata = (CIRC_ID, CREATE2_NTOR_HLEN, "\x00")
class Created2CellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': created2_bytes_good_padded,
'cell-type': Created2Cell,
'cell-bytes-good-nopadding': created2_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = CREATED2_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['hlen'] = CREATE2_NTOR_HLEN
self.cell_attributes['hdata'] = CREATE2_NTOR_HDATA_DUMMY
self.bad_parse_inputs = (created2_parse_bad_hlen,)
self.bad_make_inputs = (created2_make_bad_hlen,
created2_make_bad_hdata,)
self.encrypted = False
# for unimplemented cells, just verify they fail when we try to create them
class CreatedFastCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreatedFastCell, 'dummy')
class CreatedCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreatedCell, 'dummy')
class CreateFastCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreateFastCell, 'dummy')
class CreateCellTests(unittest.TestCase):
def test_init_fail(self):
self.assertRaises(NotImplementedError, CreateCell, 'dummy')
# Unit tests and constants for DestroyCell
DESTROY_CMD = 4
destroy_bytes_good = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
0,
)
destroy_bytes_good_padded = FixedLenCell.padCellBytes(destroy_bytes_good)
assert len(destroy_bytes_good_padded) == 512
destroy_parse_bad_reason = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
# 13 is not a valid reason
13,
)
destroy_parse_bad_reason = FixedLenCell.padCellBytes(destroy_parse_bad_reason)
assert len(destroy_parse_bad_reason) == 512
destroy_make_bad_reason = (CIRC_ID, 13)
encrypted_bytes_good = struct.pack(
"!HBB",
CIRC_ID, DESTROY_CMD,
0,
)
destroy_bytes_good_padded = FixedLenCell.padCellBytes(destroy_bytes_good)
assert len(destroy_bytes_good_padded) == 512
class DestroyCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': destroy_bytes_good_padded,
'cell-type': DestroyCell,
'cell-bytes-good-nopadding': destroy_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = DESTROY_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['reason'] = 0
self.bad_parse_inputs = (destroy_parse_bad_reason,)
self.bad_make_inputs = (destroy_make_bad_reason,)
self.encrypted = False
# Unit tests and constants for EncryptedCell
# since the payload of an encrypted cell prior to decryption is, from oppy's
# perspective, just a black box, the only type of "bad" payload data is
# a payload passed to "make()" that is too large for a relay cell
RELAY_CMD = 3
encrypted_bytes_good = struct.pack(
"!HB57s",
CIRC_ID, RELAY_CMD,
"\x00" * 509,
)
encrypted_bytes_good_padded = FixedLenCell.padCellBytes(encrypted_bytes_good)
assert len(encrypted_bytes_good_padded) == 512
encrypted_make_bad_payload_len_long = (CIRC_ID, "\x00" * 510)
encrypted_make_bad_payload_len_short = (CIRC_ID, "\x00" * 508)
class EncryptedCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': encrypted_bytes_good_padded,
'cell-type': EncryptedCell,
'cell-bytes-good-nopadding': encrypted_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = RELAY_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = {'enc_payload': "\x00" * 509, }
self.bad_parse_inputs = ()
self.bad_make_inputs = (encrypted_make_bad_payload_len_long,
encrypted_make_bad_payload_len_short,)
self.encrypted = True
def test_getBytes_trimmed(self):
# encrypted cells don't know what's in their payload, so
# "trimmed" arg doesn't make sense for them
pass
# NetInfoCell (IPv4 type/length/value) unittests and constant values
NETINFO_CMD = 8
# IPv4 type type/length/value
netinfo_bytes_good = struct.pack(
'!HBIBB4sBBB4s',
CIRC_ID, NETINFO_CMD,
0, 4, 4, "\x7f\x00\x00\x01", # 127.0.0.1
1, 4, 4, "\x7f\x00\x00\x01",
)
netinfo_bytes_good_padded = FixedLenCell.padCellBytes(netinfo_bytes_good)
assert len(netinfo_bytes_good_padded) == 512
netinfo_parse_bad_num_addresses = netinfo_bytes_good_padded[:13]
netinfo_parse_bad_num_addresses += struct.pack('!B', 200)
netinfo_parse_bad_num_addresses += netinfo_bytes_good_padded[14:]
assert len(netinfo_parse_bad_num_addresses) == 512
netinfo_make_bad_num_addresses = (CIRC_ID, TLVTriple(u'127.0.0.1'),
[TLVTriple(u'127.0.0.1') for i in xrange(50)])
class NetInfoCellIPv4Tests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': netinfo_bytes_good_padded,
'cell-type': NetInfoCell,
'cell-bytes-good-nopadding': netinfo_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = NETINFO_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['other_or_address'] = TLVTriple(u'127.0.0.1')
self.cell_attributes['this_or_addresses'] = [TLVTriple(u'127.0.0.1')]
self.cell_attributes['timestamp'] = struct.pack('!I', 0)
self.bad_parse_inputs = (netinfo_parse_bad_num_addresses,)
self.bad_make_inputs = (netinfo_make_bad_num_addresses,)
self.encrypted = False
# IPv6 type type/length/value
netinfo_bytes_good_ipv6 = struct.pack(
'!HBIBB16sBBB16s',
CIRC_ID, NETINFO_CMD,
0, 6, 16, "\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83)",
1, 6, 16, "\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83)",
)
netinfo_bytes_good_padded_ipv6 = FixedLenCell.padCellBytes(netinfo_bytes_good_ipv6)
assert len(netinfo_bytes_good_padded_ipv6) == 512
class NetInfoCellIPv6Tests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': netinfo_bytes_good_padded_ipv6,
'cell-type': NetInfoCell,
'cell-bytes-good-nopadding': netinfo_bytes_good_ipv6,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = NETINFO_CMD
self.cell_header['link_version'] = 3
self.cell_attributes = OrderedDict()
self.cell_attributes['other_or_address'] = TLVTriple(u'fe80:0000:0000:0000:0202:b3ff:fe1e:8329')
self.cell_attributes['this_or_addresses'] = [TLVTriple(u'fe80:0000:0000:0000:0202:b3ff:fe1e:8329')]
self.cell_attributes['timestamp'] = struct.pack('!I', 0)
self.bad_parse_inputs = ()
self.bad_make_inputs = ()
self.encrypted = False
# PaddingCell unittests and constant values
PADDING_CMD = 0
padding_bytes_good = struct.pack(
'!HB509s',
CIRC_ID, PADDING_CMD,
"\x00" * 509,
)
padding_bytes_good_padded = padding_bytes_good
assert len(padding_bytes_good_padded) == 512
class PaddingCellTests(FixedLenTestBase, unittest.TestCase):
def setUp(self):
self.cell_constants = {
'cell-bytes-good': padding_bytes_good_padded,
'cell-type': PaddingCell,
'cell-bytes-good-nopadding': padding_bytes_good,
}
self.cell_header = OrderedDict()
self.cell_header['circ_id'] = CIRC_ID
self.cell_header['cmd'] = PADDING_CMD
self.cell_header['link_version'] = 3
# padding cells don't have any attributes, and they don't really
# have 'bad' inputs, as the payload must be ignored
self.cell_attributes = {}
self.bad_parse_inputs = ()
self.bad_make_inputs = ()
self.encrypted = False
| bsd-3-clause |
kevin8909/xjerp | openerp/addons/auth_signup/res_config.py | 445 | 2860 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.safe_eval import safe_eval
class base_config_settings(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'auth_signup_reset_password': fields.boolean('Enable password reset from Login page',
help="This allows users to trigger a password reset from the Login page."),
'auth_signup_uninvited': fields.boolean('Allow external users to sign up',
help="If unchecked, only invited users may sign up."),
'auth_signup_template_user_id': fields.many2one('res.users',
string='Template user for new users created through signup'),
}
def get_default_auth_signup_template_user_id(self, cr, uid, fields, context=None):
icp = self.pool.get('ir.config_parameter')
# we use safe_eval on the result, since the value of the parameter is a nonempty string
return {
'auth_signup_reset_password': safe_eval(icp.get_param(cr, uid, 'auth_signup.reset_password', 'False')),
'auth_signup_uninvited': safe_eval(icp.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')),
'auth_signup_template_user_id': safe_eval(icp.get_param(cr, uid, 'auth_signup.template_user_id', 'False')),
}
def set_auth_signup_template_user_id(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool.get('ir.config_parameter')
# we store the repr of the values, since the value of the parameter is a required string
icp.set_param(cr, uid, 'auth_signup.reset_password', repr(config.auth_signup_reset_password))
icp.set_param(cr, uid, 'auth_signup.allow_uninvited', repr(config.auth_signup_uninvited))
icp.set_param(cr, uid, 'auth_signup.template_user_id', repr(config.auth_signup_template_user_id.id))
| agpl-3.0 |
lnls-fac/apsuite | apsuite/commisslib/injsi_optimization.py | 1 | 3074 | """."""
import time as _time
import numpy as _np
from epics import PV
from apsuite.optimization import SimulAnneal
from siriuspy.devices import Tune, TuneCorr, CurrInfoSI
from ..utils import MeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class InjSIParams(_ParamsBaseClass):
"""."""
def __init__(self):
"""."""
super().__init__()
self.nr_iter = 10
self.nr_pulses = 5
self.max_delta_tunex = 1e-2
self.max_delta_tuney = 1e-2
self.wait_tunecorr = 1 # [s]
self.pulse_freq = 2 # [Hz]
def __str__(self):
"""."""
ftmp = '{0:15s} = {1:9.6f} {2:s}\n'.format
dtmp = '{0:15s} = {1:9d} {2:s}\n'.format
stg = dtmp('nr_iter', self.nr_iter, '')
stg += dtmp('nr_pulses', self.nr_pulses, '')
stg += ftmp('max_delta_tunex', self.max_delta_tunex, '')
stg += ftmp('max_delta_tuney', self.max_delta_tuney, '')
stg += ftmp('wait_tunecorr', self.wait_tunecorr, '[s]')
stg += ftmp('pulse_freq', self.pulse_freq, '[Hz]')
return stg
class TuneScanInjSI(SimulAnneal, _BaseClass):
"""."""
PV_INJECTION = 'AS-RaMO:TI-EVG:InjectionEvt-Sel'
def __init__(self, save=False):
"""."""
SimulAnneal.__init__(self, save=save)
_BaseClass.__init__(self)
self.devices = dict()
self.params = InjSIParams()
self.devices['tune'] = Tune(Tune.DEVICES.SI)
self.devices['tunecorr'] = TuneCorr(TuneCorr.DEVICES.SI)
self.devices['currinfo'] = CurrInfoSI()
self.devices['injection'] = PV(TuneScanInjSI.PV_INJECTION)
self.devices['tunecorr'].cmd_update_reference()
self.data['measure'] = dict()
self.data['measure']['tunex'] = []
self.data['measure']['tuney'] = []
self.data['measure']['injeff'] = []
def _inject(self):
self.devices['injection'].value = 1
def _apply_variation(self):
tunecorr = self.devices['tunecorr']
dnux, dnuy = self.position[0], self.position[1]
tunecorr.delta_tunex = dnux
tunecorr.delta_tuney = dnuy
tunecorr.cmd_apply_delta()
_time.sleep(self.params.wait_tunecorr)
def calc_obj_fun(self):
"""."""
tune = self.devices['tune']
self.data['measure']['tunex'].append(tune.tunex)
self.data['measure']['tuney'].append(tune.tuney)
self._apply_variation()
injeff = []
for _ in range(self.params.nr_pulses):
self._inject()
injeff.append(self.devices['currinfo'].injeff)
_time.sleep(1/self.params.pulse_freq)
self.data['measure']['injeff'].append(injeff)
return - _np.mean(injeff)
def initialization(self):
"""."""
self.niter = self.params.nr_iter
self.position = _np.array([0, 0])
self.limits_upper = _np.array(
[self.params.max_delta_tunex, self.params.max_delta_tuney])
self.limits_lower = - self.limits_upper
self.deltas = self.limits_upper.copy()
| mit |
alessandro-aglietti/git-repo | subcmds/abandon.py | 48 | 2034 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import git
from progress import Progress
class Abandon(Command):
common = True
helpSummary = "Permanently abandon a development branch"
helpUsage = """
%prog <branchname> [<project>...]
This subcommand permanently abandons a development branch by
deleting it (and all its history) from your local repository.
It is equivalent to "git branch -D <branchname>".
"""
def Execute(self, opt, args):
if not args:
self.Usage()
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
print >>sys.stderr, "error: '%s' is not a valid name" % nb
sys.exit(1)
nb = args[0]
err = []
success = []
all_projects = self.GetProjects(args[1:])
pm = Progress('Abandon %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
status = project.AbandonBranch(nb)
if status is not None:
if status:
success.append(project)
else:
err.append(project)
pm.end()
if err:
for p in err:
print >>sys.stderr,\
"error: %s/: cannot abandon %s" \
% (p.relpath, nb)
sys.exit(1)
elif not success:
print >>sys.stderr, 'error: no project has branch %s' % nb
sys.exit(1)
else:
print >>sys.stderr, 'Abandoned in %d project(s):\n %s' % (
len(success), '\n '.join(p.relpath for p in success))
| apache-2.0 |
Maqical/Firestorm | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
apagac/cfme_tests | cfme/test_framework/config.py | 3 | 2560 | """
classes to manage the cfme test framework configuration
"""
import os
import warnings
import attr
import yaycl
class Configuration(object):
"""
holds the current configuration
"""
def __init__(self):
self.yaycl_config = None
def configure(self, config_dir, crypt_key_file=None):
"""
do the defered initial loading of the configuration
:param config_dir: path to the folder with configuration files
:param crypt_key_file: optional name of a file holding the key for encrypted
configuration files
:raises: AssertionError if called more than once
if the `utils.conf` api is removed, the loading can be transformed to eager loading
"""
assert self.yaycl_config is None
if crypt_key_file and os.path.exists(crypt_key_file):
self.yaycl_config = yaycl.Config(
config_dir=config_dir,
crypt_key_file=crypt_key_file)
else:
self.yaycl_config = yaycl.Config(config_dir=config_dir)
def get_config(self, name):
"""returns a yaycl config object
:param name: name of the configuration object
"""
if self.yaycl_config is None:
raise RuntimeError('cfme configuration was not initialized')
return getattr(self.yaycl_config, name)
@attr.s
class DeprecatedConfigWrapper(object):
"""
a wrapper that provides the old :code:``utils.conf`` api
"""
configuration = attr.ib()
_warn = attr.ib(default=False)
def __getattr__(self, key):
if self._warn:
warnings.warn(
'the configuration module {} will be deprecated'.format(key),
category=DeprecationWarning,
stacklevel=2,
)
return self.configuration.get_config(key)
@property
def runtime(self):
return self.configuration.runtime
def __getitem__(self, key):
if self._warn:
warnings.warn(
'the configuration module {} will be deprecated'.format(key),
category=DeprecationWarning,
stacklevel=2,
)
return self.configuration.get_config(key)
def __delitem__(self, key):
# used in bad logging
if self._warn:
warnings.warn('clearing configuration is bad', stacklevel=2)
del self.configuration.yaycl_config[key]
# for the initial usage we keep a global object
# later on we want to replace it
global_configuration = Configuration()
| gpl-2.0 |
YuriyIlyin/ansible-modules-core | files/find.py | 109 | 11313 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ruggero Marchei <[email protected]>
# (c) 2015, Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
import os
import stat
import fnmatch
import time
import re
import shutil
DOCUMENTATION = '''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: return a list of files based on specific criteria
requirements: []
description:
- Return a list files based on specific criteria. Multiple criteria are AND'd together.
options:
age:
required: false
default: null
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
required: false
default: '*'
description:
- One or more (shell type) file glob patterns, which restrict the list of files to be returned to
those whose basenames match at least one of the patterns specified. Multiple patterns can be
specified using a list.
contains:
required: false
default: null
description:
- One or more re patterns which should be matched against the file content
paths:
required: true
aliases: [ "name" ]
description:
- List of paths to the file or directory to search. All paths must be fully qualified.
file_type:
required: false
description:
- Type of file to select
choices: [ "file", "directory" ]
default: "file"
recurse:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If target is a directory, recursively descend into the directory looking for files.
size:
required: false
default: null
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
required: false
default: "mtime"
choices: [ "atime", "mtime", "ctime" ]
description:
- Choose the file property against which we compare age. Default is mtime.
hidden:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
follow:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to follow symlinks in path for systems with python 2.6+
get_checksum:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to retrieve a file's sha1 checksum
'''
EXAMPLES = '''
# Recursively find /tmp files older than 2 days
- find: paths="/tmp" age="2d" recurse=yes
# Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
- find: paths="/tmp" age="4w" size="1m" recurse=yes
# Recursively find /var/tmp files with last access time greater than 3600 seconds
- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes
# find /var/log files equal or greater than 10 megabytes ending with .log or .log.gz
- find: paths="/var/tmp" patterns="*.log","*.log.gz" size="10m"
'''
RETURN = '''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list of dictionaries
sample: [
{ path="/var/tmp/test1",
mode=0644,
...,
checksum=16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path="/var/tmp/test2",
...
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
def pfilter(f, patterns=None):
'''filter using glob patterns'''
if patterns is None:
return True
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None or \
(age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age)) or \
(age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age)):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None or \
(size >= 0 and st.st_size >= abs(size)) or \
(size < 0 and st.st_size <= abs(size)):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None: return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match (line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
return {
'mode' : "%04o" % stat.S_IMODE(st.st_mode),
'isdir' : stat.S_ISDIR(st.st_mode),
'ischr' : stat.S_ISCHR(st.st_mode),
'isblk' : stat.S_ISBLK(st.st_mode),
'isreg' : stat.S_ISREG(st.st_mode),
'isfifo' : stat.S_ISFIFO(st.st_mode),
'islnk' : stat.S_ISLNK(st.st_mode),
'issock' : stat.S_ISSOCK(st.st_mode),
'uid' : st.st_uid,
'gid' : st.st_gid,
'size' : st.st_size,
'inode' : st.st_ino,
'dev' : st.st_dev,
'nlink' : st.st_nlink,
'atime' : st.st_atime,
'mtime' : st.st_mtime,
'ctime' : st.st_ctime,
'wusr' : bool(st.st_mode & stat.S_IWUSR),
'rusr' : bool(st.st_mode & stat.S_IRUSR),
'xusr' : bool(st.st_mode & stat.S_IXUSR),
'wgrp' : bool(st.st_mode & stat.S_IWGRP),
'rgrp' : bool(st.st_mode & stat.S_IRGRP),
'xgrp' : bool(st.st_mode & stat.S_IXGRP),
'woth' : bool(st.st_mode & stat.S_IWOTH),
'roth' : bool(st.st_mode & stat.S_IROTH),
'xoth' : bool(st.st_mode & stat.S_IXOTH),
'isuid' : bool(st.st_mode & stat.S_ISUID),
'isgid' : bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec = dict(
paths = dict(required=True, aliases=['name'], type='list'),
patterns = dict(default=['*'], type='list'),
contains = dict(default=None, type='str'),
file_type = dict(default="file", choices=['file', 'directory'], type='str'),
age = dict(default=None, type='str'),
age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'),
size = dict(default=None, type='str'),
recurse = dict(default='no', type='bool'),
hidden = dict(default="False", type='bool'),
follow = dict(default="False", type='bool'),
get_checksum = dict(default="False", type='bool'),
),
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root,dirs,files in (sys.version_info < (2,6,0) and os.walk(npath)) or \
os.walk( npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname=os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
st = os.stat(fsname)
r = {'path': fsname}
if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and \
contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
if not params['recurse']:
break
else:
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n"
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
ecolitan/fatics | src/timer.py | 1 | 2084 | # Copyright (C) 2010 Wil Mahan <[email protected]>
#
# This file is part of FatICS.
#
# FatICS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FatICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FatICS. If not, see <http://www.gnu.org/licenses/>.
#
import time
from gettext import ngettext
import online
import game
import connection
from config import config
heartbeat_timeout = 5
def heartbeat():
# idle timeout
if config.idle_timeout:
now = time.time()
for u in online.online:
if (now - u.session.last_command_time > config.idle_timeout and
not u.is_admin() and
not u.has_title('TD')):
u.session.conn.idle_timeout(config.idle_timeout // 60)
# ping all zipseal clients
# I wonder if it would be better to spread out the pings in time,
# rather than sending a large number of ping requests all at once.
# However, this method is simple, and FICS timeseal 2 seems to do it
# this way (pinging all capable clients every 10 seconds).
for u in online.online:
if u.session.use_zipseal:
u.session.ping()
# forfeit games on time
for g in game.games.values():
if g.gtype == game.PLAYED and g.clock.is_ticking:
u = g.get_user_to_move()
opp = g.get_opp(u)
if opp.vars['autoflag']:
# TODO: send auto-flagging message a la original fics.
g.clock.check_flag(g, g.get_user_side(u))
connection.send_prompts()
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 smarttab autoindent
| agpl-3.0 |
codrut3/tensorflow | tensorflow/python/kernel_tests/garbage_collection_test.py | 82 | 2102 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests which set DEBUG_SAVEALL and assert no garbage was created.
This flag seems to be sticky, so these tests have been isolated for now.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class NoReferenceCycleTests(test_util.TensorFlowTestCase):
@test_util.assert_no_garbage_created
def testEagerResourceVariables(self):
with context.eager_mode():
resource_variable_ops.ResourceVariable(1.0, name="a")
@test_util.assert_no_garbage_created
def testTensorArrays(self):
with context.eager_mode():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
if __name__ == "__main__":
test.main()
| apache-2.0 |
CloudServer/cinder | cinder/volume/drivers/lvm.py | 1 | 30310 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
# FIXME(jdg): We'll put the lvm_ prefix back on these when we
# move over to using this as the real LVM driver, for now we'll
# rename them so that the config generation utility doesn't barf
# on duplicate entries.
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='default',
choices=['default', 'thin'],
help='Type of LVM volumes to deploy'),
cfg.StrOpt('lvm_conf_file',
default='/etc/cinder/lvm.conf',
help='LVM conf file to use for the LVM driver in Cinder; '
'this setting is ignored if the specified file does '
'not exist (You can also specify \'None\' to not use '
'a conf file even if one exists).')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '3.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
# Target Driver is what handles data-transport
# Transport specific code should NOT be in
# the driver (control path), this way
# different target drivers can be added (iscsi, FC etc)
target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
self.protocol = self.target_driver.protocol
self.sparse_copy_volume = False
def _sizestr(self, size_in_g):
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
msg = (_("Size for volume: %s not found, cannot secure delete.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
data["pools"] = []
total_capacity = 0
free_capacity = 0
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
thin_enabled = self.configuration.lvm_type == 'thin'
# Calculate the total volumes used by the VG group.
# This includes volumes and snapshots.
total_volumes = len(self.vg.get_volumes())
# Skip enabled_pools setting, treat the whole backend as one pool
# XXX FIXME if multipool support is added to LVM driver.
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
self.configuration.max_over_subscription_ratio),
thin_provisioning_support=thin_enabled,
thick_provisioning_support=not thin_enabled,
total_volumes=total_volumes,
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function(),
multiattach=True
))
data["pools"].append(single_pool)
self._stats = data
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
except exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
if vg_dict is None:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Enable sparse copy since lvm_type is 'thin'
self.sparse_copy_volume = True
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from LVM for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
if original_volume_status == 'available':
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self.vg.rename_volume(current_name, original_volume_name)
except processutils.ProcessExecutionError:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), volume['name'])
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# The back-end will not be renamed.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s'), volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
LOG.info(_LI('Successfully deleted volume: %s'), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations"), snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
temp_snapshot = None
previous_status = volume['previous_status']
if previous_status == 'in-use':
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
volume_path = self.local_path(temp_snapshot)
else:
volume_path = self.local_path(volume)
try:
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
if temp_snapshot:
self._delete_snapshot(context, temp_snapshot)
backup.temp_snapshot_id = None
backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
if volutils.check_already_managed_volume(self.db, lv_name):
raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
next(vg for vg in vg_list if vg['name'] == dest_vg)
except StopIteration:
LOG.error(_LE("Destination Volume Group %s does not exist"),
dest_vg)
return false_ret
helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
size_in_mb = int(volume['size']) * units.Ki
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
self._delete_volume(volume)
return (True, None)
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)
def get_pool(self, volume):
return self.backend_name
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
model_update = \
self.target_driver.ensure_export(context, volume, volume_path)
return model_update
def create_export(self, context, volume, connector, vg=None):
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class LVMISCSIDriver(LVMVolumeDriver):
"""Empty class designation for LVMISCSI.
Since we've decoupled the inheritance of iSCSI and LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISCSIDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use.'))
class LVMISERDriver(LVMVolumeDriver):
"""Empty class designation for LVMISER.
Since we've decoupled the inheritance of data path in LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISERDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISERDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use. In order to enable iser, please '
'set iscsi_protocol with the value iser.'))
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: '
'cinder.volume.targets.iser.ISERTgtAdm')
self.target_driver = importutils.import_object(
'cinder.volume.targets.iser.ISERTgtAdm',
configuration=self.configuration,
db=self.db,
executor=self._execute)
| apache-2.0 |
LukeHoersten/ansible | lib/ansible/inventory/host.py | 9 | 3582 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars
__all__ = ['Host']
class Host:
''' a single ansible host '''
#__slots__ = [ 'name', 'vars', 'groups' ]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def __eq__(self, other):
return self.name == other.name
def serialize(self):
groups = []
for group in self.groups:
groups.append(group.serialize())
return dict(
name=self.name,
vars=self.vars.copy(),
ipv4_address=self.ipv4_address,
ipv6_address=self.ipv6_address,
gathered_facts=self._gathered_facts,
groups=groups,
)
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.ipv4_address = data.get('ipv4_address', '')
self.ipv6_address = data.get('ipv6_address', '')
groups = data.get('groups', [])
for group_data in groups:
g = Group()
g.deserialize(group_data)
self.groups.append(g)
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
self.ipv4_address = name
self.ipv6_address = name
if port and port != C.DEFAULT_REMOTE_PORT:
self.set_variable('ansible_ssh_port', int(port))
self._gathered_facts = False
def __repr__(self):
return self.get_name()
def get_name(self):
return self.name
@property
def gathered_facts(self):
return self._gathered_facts
def set_gathered_facts(self, gathered):
self._gathered_facts = gathered
def add_group(self, group):
self.groups.append(group)
def set_variable(self, key, value):
self.vars[key]=value
def get_groups(self):
groups = {}
for g in self.groups:
groups[g.name] = g
ancestors = g.get_ancestors()
for a in ancestors:
groups[a.name] = a
return groups.values()
def get_vars(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
results = combine_vars(results, group.get_vars())
results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
| gpl-3.0 |
anedos/sqlalchemy-migrate-egg | migrate/changeset/databases/oracle.py | 6 | 3748 | """
Oracle database specific implementations of changeset classes.
"""
import sqlalchemy as sa
from sqlalchemy.databases import oracle as sa_base
from migrate import exceptions
from migrate.changeset import ansisql, SQLA_06
if not SQLA_06:
OracleSchemaGenerator = sa_base.OracleSchemaGenerator
else:
OracleSchemaGenerator = sa_base.OracleDDLCompiler
class OracleColumnGenerator(OracleSchemaGenerator, ansisql.ANSIColumnGenerator):
pass
class OracleColumnDropper(ansisql.ANSIColumnDropper):
pass
class OracleSchemaChanger(OracleSchemaGenerator, ansisql.ANSISchemaChanger):
def get_column_specification(self, column, **kwargs):
# Ignore the NOT NULL generated
override_nullable = kwargs.pop('override_nullable', None)
if override_nullable:
orig = column.nullable
column.nullable = True
ret = super(OracleSchemaChanger, self).get_column_specification(
column, **kwargs)
if override_nullable:
column.nullable = orig
return ret
def visit_column(self, delta):
keys = delta.keys()
if 'name' in keys:
self._run_subvisit(delta,
self._visit_column_name,
start_alter=False)
if len(set(('type', 'nullable', 'server_default')).intersection(keys)):
self._run_subvisit(delta,
self._visit_column_change,
start_alter=False)
def _visit_column_change(self, table, column, delta):
# Oracle cannot drop a default once created, but it can set it
# to null. We'll do that if default=None
# http://forums.oracle.com/forums/message.jspa?messageID=1273234#1273234
dropdefault_hack = (column.server_default is None \
and 'server_default' in delta.keys())
# Oracle apparently doesn't like it when we say "not null" if
# the column's already not null. Fudge it, so we don't need a
# new function
notnull_hack = ((not column.nullable) \
and ('nullable' not in delta.keys()))
# We need to specify NULL if we're removing a NOT NULL
# constraint
null_hack = (column.nullable and ('nullable' in delta.keys()))
if dropdefault_hack:
column.server_default = sa.PassiveDefault(sa.sql.null())
if notnull_hack:
column.nullable = True
colspec = self.get_column_specification(column,
override_nullable=null_hack)
if null_hack:
colspec += ' NULL'
if notnull_hack:
column.nullable = False
if dropdefault_hack:
column.server_default = None
self.start_alter_table(table)
self.append("MODIFY (")
self.append(colspec)
self.append(")")
class OracleConstraintCommon(object):
def get_constraint_name(self, cons):
# Oracle constraints can't guess their name like other DBs
if not cons.name:
raise exceptions.NotSupportedError(
"Oracle constraint names must be explicitly stated")
return cons.name
class OracleConstraintGenerator(OracleConstraintCommon,
ansisql.ANSIConstraintGenerator):
pass
class OracleConstraintDropper(OracleConstraintCommon,
ansisql.ANSIConstraintDropper):
pass
class OracleDialect(ansisql.ANSIDialect):
columngenerator = OracleColumnGenerator
columndropper = OracleColumnDropper
schemachanger = OracleSchemaChanger
constraintgenerator = OracleConstraintGenerator
constraintdropper = OracleConstraintDropper
| mit |
kevclarx/ansible | lib/ansible/module_utils/dellos9.py | 51 | 5696 | #
# (c) 2015 Peter Sprygada, <[email protected]>
# (c) 2017 Red Hat, Inc
#
# Copyright (c) 2016 Dell Inc.
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
_DEVICE_CONFIGS = {}
WARNING_PROMPTS_RE = [
r"[\r\n]?\[confirm yes/no\]:\s?$",
r"[\r\n]?\[y/n\]:\s?$",
r"[\r\n]?\[yes/no\]:\s?$"
]
dellos9_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in dellos9_argument_spec:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
if provider:
for param in ('auth_pass', 'password'):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def get_config(module, flags=[]):
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
cfg = to_text(out, errors='surrogate_or_strict').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
responses.append(to_text(out, errors='surrogate_or_strict'))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
for command in to_list(commands):
if command == 'end':
continue
cmd = {'command': command, 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
rc, out, err = exec_command(module, module.jsonify(cmd))
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
exec_command(module, 'end')
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
running_config = NetworkConfig(contents=running_config, indent=1)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj.children
contents[:0] = module.params['parents']
indent = 0
for c in contents:
if isinstance(c, str):
current_config_contents.append(c.rjust(len(c) + indent, ' '))
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
indent = 1
sublevel_config = '\n'.join(current_config_contents)
return sublevel_config
| gpl-3.0 |
mpenning/exscript | src/Exscript/util/weakmethod.py | 7 | 3634 | # Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Weak references to bound and unbound methods.
"""
import weakref
class DeadMethodCalled(Exception):
"""
Raised by L{WeakMethod} if it is called when the referenced object
is already dead.
"""
pass
class WeakMethod(object):
"""
Do not create this class directly; use L{ref()} instead.
"""
__slots__ = 'name', 'callback'
def __init__(self, name, callback):
"""
Constructor. Do not use directly, use L{ref()} instead.
"""
self.name = name
self.callback = callback
def _dead(self, ref):
if self.callback is not None:
self.callback(self)
def get_function(self):
"""
Returns the referenced method/function if it is still alive.
Returns None otherwise.
@rtype: callable|None
@return: The referenced function if it is still alive.
"""
raise NotImplementedError()
def isalive(self):
"""
Returns True if the referenced function is still alive, False
otherwise.
@rtype: bool
@return: Whether the referenced function is still alive.
"""
return self.get_function() is not None
def __call__(self, *args, **kwargs):
"""
Proxied to the underlying function or method. Raises L{DeadMethodCalled}
if the referenced function is dead.
@rtype: object
@return: Whatever the referenced function returned.
"""
method = self.get_function()
if method is None:
raise DeadMethodCalled('method called on dead object ' + self.name)
method(*args, **kwargs)
class _WeakMethodBound(WeakMethod):
__slots__ = 'name', 'callback', 'f', 'c'
def __init__(self, f, callback):
name = f.__self__.__class__.__name__ + '.' + f.__func__.__name__
WeakMethod.__init__(self, name, callback)
self.f = f.__func__
self.c = weakref.ref(f.__self__, self._dead)
def get_function(self):
cls = self.c()
if cls is None:
return None
return getattr(cls, self.f.__name__)
class _WeakMethodFree(WeakMethod):
__slots__ = 'name', 'callback', 'f'
def __init__(self, f, callback):
WeakMethod.__init__(self, f.__class__.__name__, callback)
self.f = weakref.ref(f, self._dead)
def get_function(self):
return self.f()
def ref(function, callback = None):
"""
Returns a weak reference to the given method or function.
If the callback argument is not None, it is called as soon
as the referenced function is garbage deleted.
@type function: callable
@param function: The function to reference.
@type callback: callable
@param callback: Called when the function dies.
"""
try:
function.__func__
except AttributeError:
return _WeakMethodFree(function, callback)
return _WeakMethodBound(function, callback)
| gpl-2.0 |
makermade/arm_android-21_arm-linux-androideabi-4.8 | lib/python2.7/distutils/tests/test_dist.py | 83 | 15708 | # -*- coding: utf8 -*-
"""Tests for distutils.dist."""
import os
import StringIO
import sys
import unittest
import warnings
import textwrap
from distutils.dist import Distribution, fix_help_options
from distutils.cmd import Command
import distutils.dist
from test.test_support import TESTFN, captured_stdout, run_unittest
from distutils.tests import support
class test_dist(Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(DistributionTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(DistributionTestCase, self).tearDown()
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_debug_mode(self):
with open(TESTFN, "w") as f:
f.write("[global]\n")
f.write("command_packages = foo.bar, splat")
files = [TESTFN]
sys.argv.append("build")
with captured_stdout() as stdout:
self.create_distribution(files)
stdout.seek(0)
self.assertEqual(stdout.read(), '')
distutils.dist.DEBUG = True
try:
with captured_stdout() as stdout:
self.create_distribution(files)
stdout.seek(0)
self.assertEqual(stdout.read(), '')
finally:
distutils.dist.DEBUG = False
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
from distutils.tests.test_dist import test_dist
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assertIsInstance(cmd, test_dist)
self.assertEqual(cmd.sample_option, "sometext")
def test_command_packages_configfile(self):
sys.argv.append("build")
self.addCleanup(os.unlink, TESTFN)
f = open(TESTFN, "w")
try:
print >> f, "[global]"
print >> f, "command_packages = foo.bar, splat"
finally:
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_write_pkg_file(self):
# Check DistributionMetadata handling of Unicode fields
tmp_dir = self.mkdtemp()
my_file = os.path.join(tmp_dir, 'f')
klass = Distribution
dist = klass(attrs={'author': u'Mister Café',
'name': 'my.package',
'maintainer': u'Café Junior',
'description': u'Café torréfié',
'long_description': u'Héhéhé'})
# let's make sure the file can be written
# with Unicode fields. they are encoded with
# PKG_INFO_ENCODING
dist.metadata.write_pkg_file(open(my_file, 'w'))
# regular ascii is of course always usable
dist = klass(attrs={'author': 'Mister Cafe',
'name': 'my.package',
'maintainer': 'Cafe Junior',
'description': 'Cafe torrefie',
'long_description': 'Hehehe'})
my_file2 = os.path.join(tmp_dir, 'f2')
dist.metadata.write_pkg_file(open(my_file2, 'w'))
def test_empty_options(self):
# an empty options dictionary should not stay in the
# list of attributes
# catching warnings
warns = []
def _warn(msg):
warns.append(msg)
self.addCleanup(setattr, warnings, 'warn', warnings.warn)
warnings.warn = _warn
dist = Distribution(attrs={'author': 'xxx', 'name': 'xxx',
'version': 'xxx', 'url': 'xxxx',
'options': {}})
self.assertEqual(len(warns), 0)
self.assertNotIn('options', dir(dist))
def test_finalize_options(self):
attrs = {'keywords': 'one,two',
'platforms': 'one,two'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
# finalize_option splits platforms and keywords
self.assertEqual(dist.metadata.platforms, ['one', 'two'])
self.assertEqual(dist.metadata.keywords, ['one', 'two'])
def test_get_command_packages(self):
dist = Distribution()
self.assertEqual(dist.command_packages, None)
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command'])
self.assertEqual(dist.command_packages,
['distutils.command'])
dist.command_packages = 'one,two'
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command', 'one', 'two'])
def test_announce(self):
# make sure the level is known
dist = Distribution()
args = ('ok',)
kwargs = {'level': 'ok2'}
self.assertRaises(ValueError, dist.announce, args, kwargs)
def test_find_config_files_disable(self):
# Ticket #1180: Allow user to disable their home config file.
temp_home = self.mkdtemp()
if os.name == 'posix':
user_filename = os.path.join(temp_home, ".pydistutils.cfg")
else:
user_filename = os.path.join(temp_home, "pydistutils.cfg")
with open(user_filename, 'w') as f:
f.write('[distutils]\n')
def _expander(path):
return temp_home
old_expander = os.path.expanduser
os.path.expanduser = _expander
try:
d = distutils.dist.Distribution()
all_files = d.find_config_files()
d = distutils.dist.Distribution(attrs={'script_args':
['--no-user-cfg']})
files = d.find_config_files()
finally:
os.path.expanduser = old_expander
# make sure --no-user-cfg disables the user cfg file
self.assertEqual(len(all_files)-1, len(files))
class MetadataTestCase(support.TempdirManager, support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(MetadataTestCase, self).tearDown()
def test_classifier(self):
attrs = {'name': 'Boa', 'version': '3.0',
'classifiers': ['Programming Language :: Python :: 3']}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_download_url(self):
attrs = {'name': 'Boa', 'version': '3.0',
'download_url': 'http://example.org/boa'}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn('Metadata-Version: 1.1', meta)
def test_long_description(self):
long_desc = textwrap.dedent("""\
example::
We start here
and continue here
and end here.""")
attrs = {"name": "package",
"version": "1.0",
"long_description": long_desc}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
meta = meta.replace('\n' + 8 * ' ', '\n')
self.assertIn(long_desc, meta)
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.0", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("requires:", meta.lower())
self.assertNotIn("obsoletes:", meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertIn("Requires: other", meta)
self.assertIn("Requires: another (==1.0)", meta)
self.assertNotIn("obsoletes:", meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assertIn("Metadata-Version: 1.1", meta)
self.assertNotIn("provides:", meta.lower())
self.assertNotIn("requires:", meta.lower())
self.assertIn("Obsoletes: other", meta)
self.assertIn("Obsoletes: another (<1.0)", meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def format_metadata(self, dist):
sio = StringIO.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_custom_pydistutils(self):
# fixes #2166
# make sure pydistutils.cfg is found
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
temp_dir = self.mkdtemp()
user_filename = os.path.join(temp_dir, user_filename)
f = open(user_filename, 'w')
try:
f.write('.')
finally:
f.close()
try:
dist = Distribution()
# linux-style
if sys.platform in ('linux', 'darwin'):
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files)
# win32-style
if sys.platform == 'win32':
# home drive should be found
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertIn(user_filename, files,
'%r not found in %r' % (user_filename, files))
finally:
os.remove(user_filename)
def test_fix_help_options(self):
help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)]
fancy_options = fix_help_options(help_tuples)
self.assertEqual(fancy_options[0], ('a', 'b', 'c'))
self.assertEqual(fancy_options[1], (1, 2, 3))
def test_show_help(self):
# smoke test, just makes sure some help is displayed
dist = Distribution()
sys.argv = []
dist.help = 1
dist.script_name = 'setup.py'
with captured_stdout() as s:
dist.parse_command_line()
output = [line for line in s.getvalue().split('\n')
if line.strip() != '']
self.assertTrue(output)
def test_read_metadata(self):
attrs = {"name": "package",
"version": "1.0",
"long_description": "desc",
"description": "xxx",
"download_url": "http://example.com",
"keywords": ['one', 'two'],
"requires": ['foo']}
dist = Distribution(attrs)
metadata = dist.metadata
# write it then reloads it
PKG_INFO = StringIO.StringIO()
metadata.write_pkg_file(PKG_INFO)
PKG_INFO.seek(0)
metadata.read_pkg_file(PKG_INFO)
self.assertEqual(metadata.name, "package")
self.assertEqual(metadata.version, "1.0")
self.assertEqual(metadata.description, "xxx")
self.assertEqual(metadata.download_url, 'http://example.com')
self.assertEqual(metadata.keywords, ['one', 'two'])
self.assertEqual(metadata.platforms, ['UNKNOWN'])
self.assertEqual(metadata.obsoletes, None)
self.assertEqual(metadata.requires, ['foo'])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-2.0 |
suneel0101/django-easyrest | setup.py | 1 | 1212 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright <2013> Suneel Chakravorty <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
setup(name='django-easyrest',
version='0.0.2',
description='An ultra-lightweight read-only REST api framework for Django',
author='Suneel Chakravorty',
author_email='[email protected]',
url='https://github.com/suneel0101/django-restroom',
packages=['easyrest'],
install_requires=[
"django",
],
package_data={
'django-restroom': ['LICENSE', '*.md'],
})
| mit |
loic/django | django/conf/locale/pt/formats.py | 504 | 1717 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
wangjun/simDownloader | simDHT.py | 5 | 7047 | #!/usr/bin/env python
# encoding: utf-8
import socket
import threading
from hashlib import sha1
from random import randint
from struct import unpack
from socket import inet_ntoa
from threading import Timer, Thread
from time import sleep
from collections import deque
from bencode import bencode, bdecode
from Queue import Queue
import simMetadata
BOOTSTRAP_NODES = (
("router.bittorrent.com", 6881),
("dht.transmissionbt.com", 6881),
("router.utorrent.com", 6881)
)
TID_LENGTH = 2
RE_JOIN_DHT_INTERVAL = 3
TOKEN_LENGTH = 2
def entropy(length):
return "".join(chr(randint(0, 255)) for _ in xrange(length))
def random_id():
h = sha1()
h.update(entropy(20))
return h.digest()
def decode_nodes(nodes):
n = []
length = len(nodes)
if (length % 26) != 0:
return n
for i in range(0, length, 26):
nid = nodes[i:i+20]
ip = inet_ntoa(nodes[i+20:i+24])
port = unpack("!H", nodes[i+24:i+26])[0]
n.append((nid, ip, port))
return n
def timer(t, f):
Timer(t, f).start()
def get_neighbor(target, nid, end=10):
return target[:end]+nid[end:]
class KNode(object):
def __init__(self, nid, ip, port):
self.nid = nid
self.ip = ip
self.port = port
class DHTClient(Thread):
def __init__(self, max_node_qsize):
Thread.__init__(self)
self.setDaemon(True)
self.max_node_qsize = max_node_qsize
self.nid = random_id()
self.nodes = deque(maxlen=max_node_qsize)
def send_krpc(self, msg, address):
try:
self.ufd.sendto(bencode(msg), address)
except Exception:
pass
def send_find_node(self, address, nid=None):
nid = get_neighbor(nid, self.nid) if nid else self.nid
tid = entropy(TID_LENGTH)
msg = {
"t": tid,
"y": "q",
"q": "find_node",
"a": {
"id": nid,
"target": random_id()
}
}
self.send_krpc(msg, address)
def join_DHT(self):
for address in BOOTSTRAP_NODES:
self.send_find_node(address)
def re_join_DHT(self):
if len(self.nodes) == 0:
self.join_DHT()
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def auto_send_find_node(self):
wait = 1.0 / self.max_node_qsize
while True:
try:
node = self.nodes.popleft()
self.send_find_node((node.ip, node.port), node.nid)
except IndexError:
pass
sleep(wait)
def process_find_node_response(self, msg, address):
nodes = decode_nodes(msg["r"]["nodes"])
for node in nodes:
(nid, ip, port) = node
if len(nid) != 20: continue
if ip == self.bind_ip: continue
n = KNode(nid, ip, port)
self.nodes.append(n)
class DHTServer(DHTClient):
def __init__(self, master, bind_ip, bind_port, max_node_qsize):
DHTClient.__init__(self, max_node_qsize)
self.master = master
self.bind_ip = bind_ip
self.bind_port = bind_port
self.process_request_actions = {
"get_peers": self.on_get_peers_request,
"announce_peer": self.on_announce_peer_request,
}
self.ufd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.ufd.bind((self.bind_ip, self.bind_port))
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT)
def run(self):
self.re_join_DHT()
while True:
try:
(data, address) = self.ufd.recvfrom(65536)
msg = bdecode(data)
self.on_message(msg, address)
except Exception:
pass
def on_message(self, msg, address):
try:
if msg["y"] == "r":
if msg["r"].has_key("nodes"):
self.process_find_node_response(msg, address)
elif msg["y"] == "q":
try:
self.process_request_actions[msg["q"]](msg, address)
except KeyError:
self.play_dead(msg, address)
except KeyError:
pass
def on_get_peers_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
tid = msg["t"]
nid = msg["a"]["id"]
token = infohash[:TOKEN_LENGTH]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(infohash, self.nid),
"nodes": "",
"token": token
}
}
self.send_krpc(msg, address)
except KeyError:
pass
def on_announce_peer_request(self, msg, address):
try:
infohash = msg["a"]["info_hash"]
token = msg["a"]["token"]
nid = msg["a"]["id"]
tid = msg["t"]
if infohash[:TOKEN_LENGTH] == token:
if msg["a"].has_key("implied_port ") and msg["a"]["implied_port "] != 0:
port = address[1]
else:
port = msg["a"]["port"]
self.master.log(infohash, (address[0], port))
except Exception:
print 'error'
pass
finally:
self.ok(msg, address)
def play_dead(self, msg, address):
try:
tid = msg["t"]
msg = {
"t": tid,
"y": "e",
"e": [202, "Server Error"]
}
self.send_krpc(msg, address)
except KeyError:
pass
def ok(self, msg, address):
try:
tid = msg["t"]
nid = msg["a"]["id"]
msg = {
"t": tid,
"y": "r",
"r": {
"id": get_neighbor(nid, self.nid)
}
}
self.send_krpc(msg, address)
except KeyError:
pass
class Master(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(True)
self.queue = Queue()
def run(self):
while True:
self.downloadMetadata()
def log(self, infohash, address=None):
self.queue.put([address, infohash])
def downloadMetadata(self):
# 100 threads for download metadata
for i in xrange(0, 100):
if self.queue.qsize() == 0:
sleep(1)
continue
announce = self.queue.get()
t = threading.Thread(target = simMetadata.download_metadata, args = (announce[0], announce[1]))
t.setDaemon(True)
t.start()
if __name__ == "__main__":
# max_node_qsize bigger, bandwith bigger, spped higher
master = Master()
master.start()
dht = DHTServer(master, "0.0.0.0", 6881, max_node_qsize=200)
dht.start()
dht.auto_send_find_node() | gpl-2.0 |
unreal666/outwiker | src/outwiker/pages/search/htmlreport.py | 3 | 2955 | # -*- coding: UTF-8 -*-
import html
from outwiker.gui.guiconfig import GeneralGuiConfig
from outwiker.core.system import getOS
class HtmlReport (object):
"""
Класс для генерации HTML-а, для вывода найденных страниц
"""
def __init__(self, pages, searchPhrase, searchTags, application):
"""
pages - список найденных страниц
searchPhrase - искомая фраза
searchTags - теги, которые участвуют в поиске
"""
self.__pages = pages
self.__searchPhrase = searchPhrase
self.__searchTags = searchTags
self.__application = application
def generate(self):
"""
Сгенерить отчет
"""
shell = u"""<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8'/>
</head>
<body>
<ol type='1'>
%s
</ol>
</body>
</html>"""
items = u""
for page in self.__pages:
items += self.generataPageView(page)
result = shell % items
return result
def generataPageView(self, page):
"""
Вернуть представление для одной страницы
"""
item = u'<b><a href="page://%s">%s</a></b>' % (
html.escape(page.subpath, True), page.title)
if page.parent.parent is not None:
item += u" (%s)" % page.parent.subpath
item += u"<br>" + self.generatePageInfo(page) + "<p></p>"
result = u"<li>%s</li>\n" % item
return result
def generatePageInfo(self, page):
tags = self.generatePageTags(page)
date = self.generateDate(page)
pageinfo = u"<font size='-1'>{tags}<br>{date}</font>".format(
tags=tags, date=date)
return pageinfo
def generateDate(self, page):
config = GeneralGuiConfig(self.__application.config)
dateStr = page.datetime.strftime(config.dateTimeFormat.value)
result = _(u"Last modified date: {0}").format(dateStr)
return result
def generatePageTags(self, page):
"""
Создать список тегов для страницы
"""
result = _(u"Tags: ")
for tag in page.tags:
result += self.generageTagView(tag) + u", "
if result.endswith(", "):
result = result[: -2]
return result
def generageTagView(self, tag):
"""
Оформление для одного тега
"""
if tag in self.__searchTags:
style = u"font-weight: bold; background-color: rgb(255,255,36);"
return u"<span style='{style}'>{tag}</span>".format(style=style, tag=tag)
else:
return tag
| gpl-3.0 |
twitchyliquid64/misc-scripts | s3tool/boto/ec2/elb/__init__.py | 110 | 32520 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
load balancing service from AWS.
"""
from boto.connection import AWSQueryConnection
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones
from boto.ec2.elb.instancestate import InstanceState
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo, get_regions, load_regions
import boto
from boto.compat import six
RegionData = load_regions().get('elasticloadbalancing', {})
def regions():
"""
Get all available regions for the ELB service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('elasticloadbalancing', connection_cls=ELBConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.elb.ELBConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.ELBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class ELBConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01')
DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get(
'Boto', 'elb_region_endpoint',
'elasticloadbalancing.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
"""
Init method to create a new connection to EC2 Load Balancing Service.
.. note:: The region argument is overridden by the region specified in
the boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(ELBConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def build_list_params(self, params, items, label):
if isinstance(items, six.string_types):
items = [items]
for index, item in enumerate(items):
params[label % (index + 1)] = item
def get_all_load_balancers(self, load_balancer_names=None, marker=None):
"""
Retrieve all load balancers associated with your account.
:type load_balancer_names: list
:keyword load_balancer_names: An optional list of load balancer names.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:rtype: :py:class:`boto.resultset.ResultSet`
:return: A ResultSet containing instances of
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {}
if load_balancer_names:
self.build_list_params(params, load_balancer_names,
'LoadBalancerNames.member.%d')
if marker:
params['Marker'] = marker
return self.get_list('DescribeLoadBalancers', params,
[('member', LoadBalancer)])
def create_load_balancer(self, name, zones, listeners=None, subnets=None,
security_groups=None, scheme='internet-facing',
complex_listeners=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type subnets: list of strings
:param subnets: A list of subnet IDs in your VPC to attach to
your LoadBalancer.
:type security_groups: list of strings
:param security_groups: The security groups assigned to your
LoadBalancer within your VPC.
:type scheme: string
:param scheme: The type of a LoadBalancer. By default, Elastic
Load Balancing creates an internet-facing LoadBalancer with
a publicly resolvable DNS name, which resolves to public IP
addresses.
Specify the value internal for this option to create an
internal LoadBalancer with a DNS name that resolves to
private IP addresses.
This option is only available for LoadBalancers attached
to an Amazon VPC.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
InstanceProtocol, SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing
either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name,
'Scheme': scheme}
# Handle legacy listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d')
if security_groups:
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer
def create_load_balancer_listeners(self, name, listeners=None,
complex_listeners=None):
"""
Creates a Listener (or group of listeners) for an existing
Load Balancer
:type name: string
:param name: The name of the load balancer to create the listeners for
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
InstanceProtocol, SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing
either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:return: The status of the request
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name}
# Handle the simple listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
return self.get_status('CreateLoadBalancerListeners', params)
def delete_load_balancer(self, name):
"""
Delete a Load Balancer from your account.
:type name: string
:param name: The name of the Load Balancer to delete
"""
params = {'LoadBalancerName': name}
return self.get_status('DeleteLoadBalancer', params)
def delete_load_balancer_listeners(self, name, ports):
"""
Deletes a load balancer listener (or group of listeners)
:type name: string
:param name: The name of the load balancer to create the listeners for
:type ports: List int
:param ports: Each int represents the port on the ELB to be removed
:return: The status of the request
"""
params = {'LoadBalancerName': name}
for index, port in enumerate(ports):
params['LoadBalancerPorts.member.%d' % (index + 1)] = port
return self.get_status('DeleteLoadBalancerListeners', params)
def enable_availability_zones(self, load_balancer_name, zones_to_add):
"""
Add availability zones to an existing Load Balancer
All zones must be in the same region as the Load Balancer
Adding zones that are already registered with the Load Balancer
has no effect.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to add.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_add,
'AvailabilityZones.member.%d')
obj = self.get_object('EnableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def disable_availability_zones(self, load_balancer_name, zones_to_remove):
"""
Remove availability zones from an existing Load Balancer.
All zones must be in the same region as the Load Balancer.
Removing zones that are not registered with the Load Balancer
has no effect.
You cannot remove all zones from an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to remove.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_remove,
'AvailabilityZones.member.%d')
obj = self.get_object('DisableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def modify_lb_attribute(self, load_balancer_name, attribute, value):
"""Changes an attribute of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to change.
* crossZoneLoadBalancing - Boolean (true)
* connectingSettings - :py:class:`ConnectionSettingAttribute` instance
* accessLog - :py:class:`AccessLogAttribute` instance
* connectionDraining - :py:class:`ConnectionDrainingAttribute` instance
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
bool_reqs = ('crosszoneloadbalancing',)
if attribute.lower() in bool_reqs:
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'LoadBalancerName': load_balancer_name}
if attribute.lower() == 'crosszoneloadbalancing':
params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled'
] = value
elif attribute.lower() == 'accesslog':
params['LoadBalancerAttributes.AccessLog.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \
value.s3_bucket_name
params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \
value.s3_bucket_prefix
params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \
value.emit_interval
elif attribute.lower() == 'connectiondraining':
params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \
value.timeout
elif attribute.lower() == 'connectingsettings':
params['LoadBalancerAttributes.ConnectionSettings.IdleTimeout'] = \
value.idle_timeout
else:
raise ValueError('InvalidAttribute', attribute)
return self.get_status('ModifyLoadBalancerAttributes', params,
verb='GET')
def get_all_lb_attributes(self, load_balancer_name):
"""Gets all Attributes of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:rtype: boto.ec2.elb.attribute.LbAttributes
:return: The attribute object of the ELB.
"""
from boto.ec2.elb.attributes import LbAttributes
params = {'LoadBalancerName': load_balancer_name}
return self.get_object('DescribeLoadBalancerAttributes',
params, LbAttributes)
def get_lb_attribute(self, load_balancer_name, attribute):
"""Gets an attribute of a Load Balancer
This will make an EC2 call for each method call.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to see.
* accessLog - :py:class:`AccessLogAttribute` instance
* crossZoneLoadBalancing - Boolean
* connectingSettings - :py:class:`ConnectionSettingAttribute` instance
* connectionDraining - :py:class:`ConnectionDrainingAttribute`
instance
:rtype: Attribute dependent
:return: The new value for the attribute
"""
attributes = self.get_all_lb_attributes(load_balancer_name)
if attribute.lower() == 'accesslog':
return attributes.access_log
if attribute.lower() == 'crosszoneloadbalancing':
return attributes.cross_zone_load_balancing.enabled
if attribute.lower() == 'connectiondraining':
return attributes.connection_draining
if attribute.lower() == 'connectingsettings':
return attributes.connecting_settings
return None
def register_instances(self, load_balancer_name, instances):
"""
Add new Instances to an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to add.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('RegisterInstancesWithLoadBalancer',
params, [('member', InstanceInfo)])
def deregister_instances(self, load_balancer_name, instances):
"""
Remove Instances from an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to remove.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DeregisterInstancesFromLoadBalancer',
params, [('member', InstanceInfo)])
def describe_instance_health(self, load_balancer_name, instances=None):
"""
Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DescribeInstanceHealth', params,
[('member', InstanceState)])
def configure_health_check(self, name, health_check):
"""
Define a health check for the EndPoints.
:type name: string
:param name: The mnemonic name associated with the load balancer
:type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:param health_check: A HealthCheck object populated with the desired
values.
:rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck`
"""
params = {'LoadBalancerName': name,
'HealthCheck.Timeout': health_check.timeout,
'HealthCheck.Target': health_check.target,
'HealthCheck.Interval': health_check.interval,
'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold,
'HealthCheck.HealthyThreshold': health_check.healthy_threshold}
return self.get_object('ConfigureHealthCheck', params, HealthCheck)
def set_lb_listener_SSL_certificate(self, lb_name, lb_port,
ssl_certificate_id):
"""
Sets the certificate that terminates the specified listener's SSL
connections. The specified certificate replaces any prior certificate
that was used on the same LoadBalancer and port.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port,
'SSLCertificateId': ssl_certificate_id}
return self.get_status('SetLoadBalancerListenerSSLCertificate', params)
def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes that follow
that of an application-generated cookie. This policy can only be
associated with HTTP listeners.
This policy is similar to the policy created by
CreateLBCookieStickinessPolicy, except that the lifetime of the special
Elastic Load Balancing cookie follows the lifetime of the
application-generated cookie specified in the policy configuration. The
load balancer only inserts a new stickiness cookie when the application
response includes a new application cookie.
If the application cookie is explicitly removed or expires, the session
stops being sticky until a new application cookie is issued.
"""
params = {'CookieName': name,
'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('CreateAppCookieStickinessPolicy', params)
def create_lb_cookie_stickiness_policy(self, cookie_expiration_period,
lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes controlled
by the lifetime of the browser (user-agent) or a specified expiration
period. This policy can only be associated only with HTTP listeners.
When a load balancer implements this policy, the load balancer uses a
special cookie to track the backend server instance for each request.
When the load balancer receives a request, it first checks to see if
this cookie is present in the request. If so, the load balancer sends
the request to the application server specified in the cookie. If not,
the load balancer sends the request to a server that is chosen based on
the existing load balancing algorithm.
A cookie is inserted into the response for binding subsequent requests
from the same user to that server. The validity of the cookie is based
on the cookie expiration time, which is specified in the policy
configuration.
None may be passed for cookie_expiration_period.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
if cookie_expiration_period is not None:
params['CookieExpirationPeriod'] = cookie_expiration_period
return self.get_status('CreateLBCookieStickinessPolicy', params)
def create_lb_policy(self, lb_name, policy_name, policy_type,
policy_attributes):
"""
Creates a new policy that contains the necessary attributes
depending on the policy type. Policies are settings that are
saved for your load balancer and that can be applied to the
front-end listener, or the back-end application server.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name,
'PolicyTypeName': policy_type}
for index, (name, value) in enumerate(six.iteritems(policy_attributes), 1):
params['PolicyAttributes.member.%d.AttributeName' % index] = name
params['PolicyAttributes.member.%d.AttributeValue' % index] = value
else:
params['PolicyAttributes'] = ''
return self.get_status('CreateLoadBalancerPolicy', params)
def delete_lb_policy(self, lb_name, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('DeleteLoadBalancerPolicy', params)
def set_lb_policies_of_listener(self, lb_name, lb_port, policies):
"""
Associates, updates, or disables a policy with a listener on the load
balancer. Currently only zero (0) or one (1) policy can be associated
with a listener.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port}
if len(policies):
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
def set_lb_policies_of_backend_server(self, lb_name, instance_port,
policies):
"""
Replaces the current set of policies associated with a port on which
the back-end server is listening with a new set of policies.
"""
params = {'LoadBalancerName': lb_name,
'InstancePort': instance_port}
if policies:
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesForBackendServer',
params)
def apply_security_groups_to_lb(self, name, security_groups):
"""
Associates one or more security groups with the load balancer.
The provided security groups will override any currently applied
security groups.
:type name: string
:param name: The name of the Load Balancer
:type security_groups: List of strings
:param security_groups: The name of the security group(s) to add.
:rtype: List of strings
:return: An updated list of security groups for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
return self.get_list('ApplySecurityGroupsToLoadBalancer',
params, None)
def attach_lb_to_subnets(self, name, subnets):
"""
Attaches load balancer to one or more subnets.
Attaching subnets that are already registered with the
Load Balancer has no effect.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to add.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('AttachLoadBalancerToSubnets',
params, None)
def detach_lb_from_subnets(self, name, subnets):
"""
Detaches load balancer from one or more subnets.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to detach.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('DetachLoadBalancerFromSubnets',
params, None)
| mit |
pcameron/javafuse-read-only | fs/build/mx4j/tools/jython/jmxUtils.py | 7 | 5355 | """ Copyright (C) MX4J.
All rights reserved.
This software is distributed under the terms of the MX4J License version 1.0.
See the terms of the MX4J License in the documentation provided with this software.
author <a href="mailto:[email protected]">Carlos Quiroz</a>
version $Revision: 1.1 $
Adapted by Martin Fuzzey for testing use.
For this we need to communicate with a REMOTE server (the orignal code
always ran in the same process as the JMX server and was intended to be
used as helpers for python scripts in the python MBean
"""
import sys,java
sys.add_package("javax.management")
sys.add_package("javax.management.loading");
sys.add_package("javax.management.modelmbean");
sys.add_package("javax.management.monitor");
sys.add_package("javax.management.openmbean");
sys.add_package("javax.management.relation");
sys.add_package("javax.management.remote");
sys.add_package("javax.management.remote.rmi");
sys.add_package("javax.management.timer");
from javax.management import *
from javax.management.loading import *
from javax.management.modelmbean import *
from javax.management.monitor import *
from javax.management.openmbean import *
from javax.management.relation import *
from javax.management.remote import *
from javax.management.remote.rmi import *
from javax.management.timer import *
class ServerConnection:
def __init__(self, connection) :
self.server = connection
def createProxy(self, objectname) :
"""
Creates a proxy for the named MBean in this server.
The objectname may either be an instance of javax.management.ObjectName
or a string
The MBeans attributes and methods may be then accessed directly as in :
proxy = server.createProxy("myDomain:myType=toto")
print "val=",proxy.val
proxy.doSomething()
"""
if (isinstance(objectname, ObjectName) == 0) :
objectname = ObjectName(objectname)
return Proxy(self.server, objectname)
def getMBeanNames(self, query="*:*"):
"""
Returns a list of all the available MBeans in the server. The optional
query parameter will filter the list by objectname
"""
names = []
for n in self.server.queryNames(ObjectName(query), None) :
names.append(n) ;# To python collection
return names
def getInstanceNames(self, classname, query="*:*"):
"""
Returns a list of all the available MBeans in the server which are instances
of classname. It accepts a query parameter to filter by objectname
"""
return [x for x in self.getMBeanNames(query) if self.server.isInstanceOf(x, classname)]
class OperationProxy:
def __init__(self, server, objectname, opInfo):
self.server = server
self.objectname = objectname
self.operation = opInfo.name
self.sig = []
for s in opInfo.signature :
self.sig.append(s.type)
def invoke(self, *args):
if (len(args) != len(self.sig)) :
raise "argument list / sig mismatch" + str(args) + str(self.sig)
# Manually map Boolean
nargs = []
for i in range(len(args)) :
arg = args[i]
if (self.sig[i] == "boolean") :
arg = java.lang.Boolean(arg)
nargs.append(arg)
return self.server.invoke(self.objectname, self.operation, nargs, self.sig)
class Proxy:
def __init__(self, server, objectname):
# Need the syntax below to avoid infinite recursion betweed setattr + getattr
self.__dict__["server"] = server
self.__dict__["objectname"] = objectname
info = self.server.getMBeanInfo(objectname)
for o in info.operations:
self.__dict__[o.name] = OperationProxy(self.server, objectname, o).invoke
# print "op:", o.name
def __getattr__(self, name):
return self.server.getAttribute(self.objectname, name)
def __setattr__(self, name, value):
from javax.management import Attribute
return self.server.setAttribute(self.objectname, Attribute(name, value))
def __repr__(self):
return "Proxy of MBean: %s " % (self.__dict__["objectname"], )
def invoke(self, name, arguments=None, types=None):
return self.server.invoke(self.objectname, name, arguments, types)
def addListener(self, l, filter=None, handback=None) :
self.server.addNotificationListener(self.objectname, l, filter, handback)
class proxy (Proxy): # For backwards compatibility
pass
def mbeans(query=None):
"""
Returns a list of all the available MBeans in the server. The optional
query parameter will filter the list by objectname
"""
if query:
return server.queryMBeans(ObjectName(query), None)
else:
return server.queryMBeans(None, None)
def instances(classname, query=None):
"""
Returns a list of all the available MBeans in the server which are instances
of classname. It accepts a query parameter to filter by objectname
"""
return [x for x in mbeans(query) if server.isInstanceOf(x.getObjectName(),classname)]
| gpl-3.0 |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py | 7 | 3749 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
import numpy as np
from threading import Thread
def feed_data(feed_queue, inputs):
for in_data in inputs:
feed_queue.push(in_data)
class TestPyReader(unittest.TestCase):
def setUp(self):
self.capacity = 10
self.batch_size_min = 10
self.batch_size_max = 20
self.shapes = [(-1, 3, 2, 1), (-1, 1)]
self.lod_levels = [0, 0]
self.dtypes = ['float32', 'int64']
self.iterations = 20
def test_single_thread_main(self):
self.main(use_thread=False)
def test_multiple_thread_main(self):
self.main(use_thread=True)
def main(self, use_thread=False):
with fluid.program_guard(fluid.Program(), fluid.Program()):
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
executor = fluid.Executor(place)
data_file = fluid.layers.py_reader(
capacity=self.capacity,
dtypes=self.dtypes,
lod_levels=self.lod_levels,
shapes=self.shapes)
feed_queue = data_file.queue
read_out_data = fluid.layers.read_file(data_file)
self.inputs = []
for i in range(self.iterations):
in_data = fluid.LoDTensorArray()
batch_size = np.random.random_integers(self.batch_size_min,
self.batch_size_max)
for shape, dtype in zip(self.shapes, self.dtypes):
next_data = np.random.uniform(
low=0, high=1000,
size=(batch_size, ) + shape[1:]).astype(dtype)
in_data.append(
fluid.executor._as_lodtensor(next_data, place))
self.inputs.append(in_data)
executor.run(fluid.default_startup_program())
self.outputs = []
if use_thread:
thread = Thread(
target=feed_data, args=(feed_queue, self.inputs))
thread.start()
for in_data in self.inputs:
self.outputs.append(
executor.run(fetch_list=list(read_out_data)))
else:
for in_data in self.inputs:
feed_queue.push(in_data)
self.outputs.append(
executor.run(fetch_list=list(read_out_data)))
feed_queue.close()
self.validate()
def validate(self):
self.assertEqual(len(self.inputs), len(self.outputs))
for in_data_list, out_data_list in zip(self.inputs, self.outputs):
self.assertEqual(len(in_data_list), len(out_data_list))
in_data_list_np = [
np.array(in_lod_tensor) for in_lod_tensor in in_data_list
]
for in_data, out_data in zip(in_data_list_np, out_data_list):
self.assertTrue((in_data == out_data).all())
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ronekko/chainer | tests/chainer_tests/functions_tests/math_tests/test_clip.py | 2 | 2885 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestClip(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Avoid values around x_min and x_max for stability of numerical
# gradient
for ind in numpy.ndindex(self.x.shape):
if -0.76 < self.x[ind] < -0.74:
self.x[ind] = -0.5
elif 0.74 < self.x[ind] < 0.76:
self.x[ind] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.x_min = -0.75
self.x_max = 0.75
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.clip(x, self.x_min, self.x_max)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = self.x.copy()
for i in numpy.ndindex(self.x.shape):
if self.x[i] < self.x_min:
y_expect[i] = self.x_min
elif self.x[i] > self.x_max:
y_expect[i] = self.x_max
testing.assert_allclose(y_expect, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
def f(x):
return functions.clip(x, self.x_min, self.x_max)
gradient_check.check_backward(
f, x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, gx_grad):
def f(x):
return functions.clip(x, self.x_min, self.x_max)
gradient_check.check_double_backward(
f, x_data, y_grad, gx_grad, dtype=numpy.float64, atol=1e-3)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
class TestClipInvalidInterval(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def test_invalid_interval(self):
with self.assertRaises(AssertionError):
functions.clip(self.x, 1.0, -1.0)
testing.run_module(__name__, __file__)
| mit |
reinout/django | tests/forms_tests/field_tests/test_datefield.py | 23 | 8167 | from datetime import date, datetime
from django.forms import (
DateField, Form, HiddenInput, SelectDateWidget, ValidationError,
)
from django.test import SimpleTestCase, override_settings
from django.utils import translation
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class DateFieldTest(SimpleTestCase):
def test_form_field(self):
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict(), we must
# accept the input from the "as_hidden" rendering as well.
self.assertHTMLEqual(
a['mydate'].as_hidden(),
'<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />',
)
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_date_changed(self):
"""
DateField.has_changed() with SelectDateWidget works with a localized
date format (#17165).
"""
# With Field.show_hidden_initial=False
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 1)),
}, initial={'mydate': date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput().format_value(date(2008, 4, 22)),
}, initial={'mydate': date(2008, 4, 1)})
self.assertFalse(b.has_changed())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Voer een geldige datum in.']})
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
def test_datefield_1(self):
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(date(2006, 10, 25), f.clean('25 October, 2006'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('2006-4-31')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('200a-10-25')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('25/10/06')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(date(2006, 10, 25), f.clean('2006 10 25'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('2006-10-25')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('10/25/2006')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(date(2006, 10, 25), f.clean(' 25 October 2006 '))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean('a\x00b')
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""field.strptime() doesn't raise a UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
| bsd-3-clause |
VishvajitP/readthedocs.org | readthedocs/rtd_tests/tests/test_redirects.py | 20 | 10441 | from django.test import TestCase
from django.test.utils import override_settings
from django_dynamic_fixture import get
from django_dynamic_fixture import fixture
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import Project
from readthedocs.redirects.models import Redirect
import logging
class RedirectTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
logging.disable(logging.DEBUG)
self.client.login(username='eric', password='test')
self.client.post(
'/dashboard/import/',
{'repo_type': 'git', 'name': 'Pip',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://pip.rtfd.org',
'repo': 'https://github.com/fail/sauce',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': LATEST,
'privacy_level': 'public',
'version_privacy_level': 'public',
'description': 'wat',
'documentation_type': 'sphinx'})
pip = Project.objects.get(slug='pip')
pip.versions.create_latest()
def test_proper_url_no_slash(self):
r = self.client.get('/docs/pip')
# This is triggered by Django, so its a 301, basically just
# APPEND_SLASH
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/docs/pip/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 302)
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url(self):
r = self.client.get('/docs/pip/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_with_lang_slug_only(self):
r = self.client.get('/docs/pip/en/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_full(self):
r = self.client.get('/docs/pip/en/latest/')
self.assertEqual(r.status_code, 200)
def test_proper_url_full_with_filename(self):
r = self.client.get('/docs/pip/en/latest/test.html')
self.assertEqual(r.status_code, 200)
# Specific Page Redirects
def test_proper_page_on_main_site(self):
r = self.client.get('/docs/pip/page/test.html')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://testserver/docs/pip/en/latest/test.html')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_with_version_slug_only(self):
r = self.client.get('/docs/pip/latest/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
# If slug is neither valid lang nor valid version, it should 404.
# TODO: This should 404 directly, not redirect first
def test_improper_url_with_nonexistent_slug(self):
r = self.client.get('/docs/pip/nonexistent/')
self.assertEqual(r.status_code, 302)
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 404)
def test_improper_url_filename_only(self):
r = self.client.get('/docs/pip/test.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_file(self):
r = self.client.get('/docs/pip/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_subdir_file(self):
r = self.client.get('/docs/pip/nonexistent_dir/subdir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_lang_file(self):
r = self.client.get('/docs/pip/en/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_lang_subdir_file(self):
r = self.client.get('/docs/pip/en/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_subdir_file(self):
r = self.client.get('/docs/pip/en/nonexistent_dir/subdir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_version_dir_file(self):
r = self.client.get('/docs/pip/latest/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
# Subdomains
def test_proper_subdomain(self):
r = self.client.get('/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/')
def test_proper_subdomain_with_lang_slug_only(self):
r = self.client.get('/en/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/')
def test_proper_subdomain_and_url(self):
r = self.client.get('/en/latest/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 200)
def test_proper_subdomain_and_url_with_filename(self):
r = self.client.get(
'/en/latest/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 200)
# Specific Page Redirects
def test_proper_page_on_subdomain(self):
r = self.client.get('/page/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://pip.readthedocs.org/en/latest/test.html')
# When there's only a version slug, the redirect prepends the lang slug
def test_proper_subdomain_with_version_slug_only(self):
r = self.client.get('/1.4.1/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://pip.readthedocs.org/en/1.4.1/')
def test_improper_subdomain_filename_only(self):
r = self.client.get('/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 404)
class RedirectUnderscoreTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
logging.disable(logging.DEBUG)
self.client.login(username='eric', password='test')
whatup = Project.objects.create(
slug='what_up', name='What Up Underscore')
# Test _ -> - slug lookup
def test_underscore_redirect(self):
r = self.client.get('/',
HTTP_HOST='what-up.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://what-up.readthedocs.org/en/latest/')
class RedirectAppTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.client.post(
'/dashboard/import/',
{'repo_type': 'git', 'name': 'Pip',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://pip.rtfd.org',
'repo': 'https://github.com/fail/sauce',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': LATEST,
'privacy_level': 'public',
'version_privacy_level': 'public',
'description': 'wat',
'documentation_type': 'sphinx'})
self.pip = Project.objects.get(slug='pip')
self.pip.versions.create_latest()
@override_settings(USE_SUBDOMAIN=True)
def test_redirect_root(self):
Redirect.objects.create(
project=self.pip, redirect_type='prefix', from_url='/woot/')
r = self.client.get('/woot/faq.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq.html')
@override_settings(USE_SUBDOMAIN=True)
def test_redirect_page(self):
Redirect.objects.create(
project=self.pip, redirect_type='page', from_url='/install.html', to_url='/tutorial/install.html')
r = self.client.get('/install.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/tutorial/install.html')
@override_settings(USE_SUBDOMAIN=True, PYTHON_MEDIA=True)
def test_redirect_html(self):
Redirect.objects.create(
project=self.pip, redirect_type='sphinx_html')
r = self.client.get('/en/latest/faq/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq.html')
@override_settings(USE_SUBDOMAIN=True, PYTHON_MEDIA=True)
def test_redirect_htmldir(self):
Redirect.objects.create(
project=self.pip, redirect_type='sphinx_htmldir')
r = self.client.get('/en/latest/faq.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq/')
class RedirectBuildTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.project = get(Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
versions=[fixture()])
self.version = self.project.versions.all()[0]
def test_redirect_list(self):
r = self.client.get('/builds/project-1/')
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/projects/project-1/builds/')
def test_redirect_detail(self):
r = self.client.get('/builds/project-1/1337/')
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/projects/project-1/builds/1337/')
| mit |
isi-nlp/bolinas | extractor_synsem/extractor_synsem.py | 2 | 14687 | from common.exceptions import InvocationException
from common.hgraph.hgraph import Hgraph
from common.cfg import NonterminalLabel
from lib.tree import Tree
import re
from collections import defaultdict as ddict
import itertools
from parser.vo_rule import Rule
import sys
DEFAULT_COMPOSITION_DEPTH = 3
class ExtractorSynSem:
def __init__(self):
pass
@classmethod
def help(self):
"""
Returns SynSem help message.
"""
return 'Usage: python extract-synsem <nl_file> <mr_file> ' + \
'<alignment_file> <destination> [composition_depth (default %d)]' % \
DEFAULT_COMPOSITION_DEPTH
def main(self, *args):
"""
Extracts rules from the given training data, with an optional composition
depth specified.
"""
if len(args) < 4:
print self.help()
raise InvocationException()
nl_path, mr_path, align_path, destination_prefix = args[:4]
if len(args) == 4:
composition_depth = DEFAULT_COMPOSITION_DEPTH
elif len(args) == 5:
composition_depth = int(args[4])
else:
print self.help()
raise InvocationException()
self.extract_rules_corpus(nl_path, mr_path, align_path, destination_prefix,
composition_depth)
def extract_rules_corpus(self, nl_path, amr_path, alignment_path,
destination_prefix, composition_depth):
"""
Extract all rules from the corpus specified by the *_path arguments.
"""
syn_f = open(nl_path)
sem_f = open(amr_path)
align_f = open(alignment_path)
n_examples = count_lines(amr_path)
announce_interval = n_examples / 10
# load input data into examples list
examples = []
for example_i in range(n_examples):
syn_s = syn_f.readline().strip()
sem_s = sem_f.readline().strip()
align_s = align_f.readline().strip()
amr = Dag.from_string(sem_s)
tree = Tree(syn_s)
label_spans(tree)
align = get_alignments(align_s, amr)
examples.append((amr, tree, align))
# extract rules from data
rules = []
for example in examples:
example_rules = extract_rules(example[0], example[1], example[2],
composition_depth)
rules += example_rules
# assign ML weights by counting
grammar = collect_counts(rules)
Rule.write_to_file(grammar, destination_prefix)
def count_lines(filename):
"""
Counts the number of lines in the given file.
"""
n_lines = 0
with open(filename) as f:
for line in f:
n_lines += 1
return n_lines
def get_alignments(align_s, amr):
"""
Converts alignments into an actual mapping into edges of the AMR object.
"""
alignments = ddict(list)
align_s_parts = align_s.split()
for part in align_s_parts:
match = re.match(r'([^:]+):([^:]+:?[^:]+):([^:]+)-(\d+)', part)
head = match.group(1)
label = match.group(2)
tail = match.group(3)
index = int(match.group(4))
edge_l = [e for e in amr.triples() if
e[0] == head and \
e[1] == label and \
e[2] == (tail,)]
assert len(edge_l) == 1
alignments[edge_l[0]].append(index)
return dict(alignments)
def label_spans(tree, start=0):
"""
Labels each constituent with its corresponding sentence span (so that we can
distinguish constituents over different parts of the sentence with identical
tree structure.
"""
end = start
if isinstance(tree, Tree):
for child in tree:
end = label_spans(child, end)
tree.span = (start, end)
return end
else:
return end + 1
def minimal_aligned(constituents, tree_aligned):
"""
Finds frontier constituents.
"""
minimal_constituents = []
for key in constituents:
start,end,height = key
# ignore unaligned constituents
if len(tree_aligned[key]) == 0:
continue
# ignore constituents which have children with identical alignments
minimal = True
for key2 in constituents:
start2,end2,height2 = key2
if tree_aligned[key] == tree_aligned[key2] and start2 >= start and \
end2 <= end and height2 < height:
minimal = False
break
if not minimal:
continue
minimal_constituents.append(key)
return minimal_constituents
# HERE BE DRAGONS
# The following methods implement various searches through the AMR necessary to
# produce the heuristic attachment of unaligned edges described in the paper.
def amr_reachable_h(edges, amr, predicate, expander, seen=None):
if seen == None:
seen = set()
for e in edges:
if e in seen:
continue
if not predicate(e):
continue
seen.add(e)
amr_reachable_h(expander(e), amr, predicate, expander, seen)
return seen
def a_parents(edge, amr):
return amr.in_edges(edge[0])
def a_children(edge, amr):
for t in edge[2]:
return amr.out_edges(t)
def amr_reachable_forward(edges, amr, predicate):
return amr_reachable_h(edges, amr, predicate, lambda e: a_parents(e, amr))
def amr_reachable_backward(edges, amr, predicate):
return amr_reachable_h(edges, amr, predicate, lambda e: a_children(e, amr))
def amr_reachable_nothru_i(edge, amr, predicate, reachable, seen):
if edge in seen:
return
seen.add(edge)
if all(c in reachable for c in a_parents(edge, amr)):
for c in a_parents(edge, amr):
if all(p in reachable for p in a_children(edge, amr)):
amr_reachable_nothru_i(c, amr, predicate, reachable, seen)
if all(p in reachable for p in a_children(edge, amr)):
for p in a_children(edge, amr):
if all(c in reachable for c in a_parents(edge, amr)):
amr_reachable_nothru_i(p, amr, predicate, reachable, seen)
def amr_reachable_nothru(edges, amr, predicate=lambda e: True):
forward = amr_reachable_forward(edges, amr, predicate)
backward = amr_reachable_backward(edges, amr, predicate)
reachable = forward | backward
seen = set()
for edge in edges:
amr_reachable_nothru_i(edge, amr, predicate, reachable, seen)
return seen
def minimal_frontier(frontier):
"""
Extracts the minimal frontier set from the given frontier set.
"""
min_frontier = []
for f in frontier:
fstart, fend = f[0].span
minimal = True
for g in frontier:
gstart, gend = g[0].span
if gstart >= fstart and gend <= fend and g[0].height() < f[0].height():
minimal = False
break
if minimal:
min_frontier.append(f)
return min_frontier
def frontier_edges(amr, tree, alignments):
"""
Extracts the frontier set.
"""
frontier = []
constituents = {}
if isinstance(tree, Tree):
for constituent in tree.subtrees():
key = (constituent.span[0], constituent.span[1], constituent.height())
assert key not in constituents
constituents[key] = constituent
tree_aligned = ddict(set)
for edge in alignments:
for index in alignments[edge]:
for key in constituents:
start,end,height = key
if start <= index < end:
tree_aligned[key].add(index)
aligned_constituents = minimal_aligned(constituents, tree_aligned)
for key in aligned_constituents:
start,end,height = key
constituent = constituents[key]
aligned_edges = [e for e in alignments if all(start <= i < end for i in
alignments[e])]
if constituent == tree:
reachable_edges = amr.triples()
else:
reachable_edges = amr_reachable_nothru(aligned_edges, amr,
lambda e: e in aligned_edges or e not in alignments)
aligned_fragment = Dag.from_triples(reachable_edges)
if len(aligned_fragment.root_edges()) == 1:
frontier.append((constituent, aligned_fragment))
min_frontier = minimal_frontier(frontier)
min_frontier_sorted = sorted(min_frontier, key = lambda m:
len(list(m[0].subtrees())))
return min_frontier_sorted
def collapse_constituent(tree, constituent, label):
"""
Shortcut: replaces a constituent with a single nonterminal label.
"""
return replace_constituent(tree, constituent, str(label))
def replace_constituent(tree, constituent, new_constituent):
"""
Replaces one constituent in this tree with another.
"""
# We injected span, so the standard __eq__ check doesn't look for it
if tree == constituent and (not isinstance(tree, Tree) or tree.span ==
constituent.span):
return new_constituent
if not isinstance(tree, Tree):
return tree
n_tree = Tree(tree.node, [replace_constituent(subtree, constituent,
new_constituent) for subtree in tree])
n_tree.span = tree.span
return n_tree
def collapse_alignments(alignments, amr_fragment, new_triple):
"""
Adjusts alignments when replacing collapsing graph & tree fragments.
"""
new_alignments = {}
new_triple_alignment = []
for triple in alignments:
if triple in amr_fragment.triples():
new_triple_alignment += alignments[triple]
else:
new_alignments[triple] = alignments[triple]
new_triple_alignment = list(set(new_triple_alignment))
new_alignments[new_triple] = new_triple_alignment
return new_alignments
def make_rule(frontier_pair, amr, tree, align, next_index):
"""
Creates a new rule with the given parts, and collapses these parts in the
original graph and tree.
"""
constituent, amr_fragment = frontier_pair
outside_edges = [e for e in amr.triples() if e not in amr_fragment.triples()]
root_label = amr_fragment.root_edges()[0][1]
if isinstance(root_label, NonterminalLabel):
symbol = root_label.label
m = re.match(r'(.+)_(.+)_(\d+)', symbol)
role = m.group(1)
else:
if ':' in root_label:
role, concept = root_label.split(':')
else:
role = root_label
external_nodes = amr.find_external_nodes(amr_fragment)
if len(external_nodes) == 0:
external_nodes = [amr_fragment.find_leaves()[0]]
# WARNING: destructive. Unfortunately we can't make the change any earlier.
# TODO why?
amr_fragment.external_nodes = external_nodes
symbol = '%s_%s_%d' % (role, constituent.node, len(external_nodes))
label = NonterminalLabel(symbol, next_index)
new_triple = (amr_fragment.roots[0], label, tuple(external_nodes))
new_amr = amr.collapse_fragment(amr_fragment, label)
assert new_triple in new_amr.triples()
new_tree = collapse_constituent(tree, constituent, label)
new_alignments = collapse_alignments(align, amr_fragment, new_triple)
rule = Rule(0, symbol, 1, amr_fragment, constituent, original_index =
next_index)
return rule, new_amr, new_tree, new_alignments, next_index+1
def make_composed_rule(rule, cdict):
"""
Creates a composed rule by replacing every nonterminal in this rule's RHS with
the graph and tree fragment specified in cdict.
"""
for label, crule in cdict.items():
replacement_triple_l = [e for e in rule.amr.triples() if e[1] == label]
assert len(replacement_triple_l) == 1
replacement_fragment = Dag.from_triples(replacement_triple_l)
new_amr = rule.amr.replace_fragment(replacement_fragment, crule.amr)
new_tree = replace_constituent(rule.parse, str(label), crule.parse)
new_rule = Rule(rule.rule_id, rule.symbol, rule.weight, new_amr, new_tree,
original_index = rule.original_index)
rule = new_rule
return rule
def make_composed_rules(rules, max_depth):
"""
Finds all possible composed rules, up to the specified max depth.
"""
composed_rules = []
# add all base rules
for rule in rules:
composed_rules.append(rule)
# incrementally compose rules up to the max depth
for i in range(1, max_depth):
composed_rules_this_depth = []
# consider each rule...
for rule in rules:
nt_labels = [e[1] for e in rule.amr.triples() if isinstance(e[1],
NonterminalLabel)]
if len(nt_labels) == 0:
continue
# ...and try to replace its nonterminals with the fragments from other
# composed rules
# we cheat here by relying on the fact that nonterminal indices are
# never repeated in the induced derivation of a training example (so if a
# rule has original_index n, we are sure it can only replace the
# nonterminal with the same index)
composition_candidates = {}
for label in nt_labels:
composition_candidates[label] = []
for crule in composed_rules:
if crule.original_index != label.index:
continue
composition_candidates[label].append(crule)
# we have a set of possible substitutions (of varying depth) for each
# nonterminal; now we consider every possible way of combining them (the
# Cartesian product of all the candidate lists)
comp_cand_list = []
label_list = []
for label, comp_cand in composition_candidates.items():
label_list.append(label)
comp_cand_list.append(comp_cand)
compositions = itertools.product(*comp_cand_list)
compositions = list(compositions)
# now actually create the composed rules
for composition in compositions:
cdict = dict(zip(label_list, composition))
composed_rule = make_composed_rule(rule, cdict)
composed_rules_this_depth.append(composed_rule)
composed_rules += composed_rules_this_depth
return [rule.canonicalize_amr() for rule in composed_rules]
def extract_rules(amr, tree, align, composition_depth):
"""
Extracts all possible rules from the given tree-string pair.
"""
rules = []
frontier = frontier_edges(amr, tree, align)
next_index = 0
while frontier:
rule, amr, tree, align, next_index = make_rule(frontier[0], amr, tree,
align, next_index)
rules.append(rule)
frontier = frontier_edges(amr, tree, align)
composed_rules = make_composed_rules(rules, composition_depth)
return composed_rules
def collect_counts(rules):
"""
Collects counts of the number of times each rule is used in the training data
for the "observed derivation" ML estimate of rule weights.
"""
rule_mapper = {}
rule_counter = {}
rule_normalizer = ddict(lambda:0.0)
for rule in rules:
rule_key = '%s:::%s:::%s' % (rule.symbol, rule.amr, rule.parse)
rule_key = re.sub(r'\s+', ' ', rule_key)
rule_key = re.sub(r'\[\d+\]', '[D]', rule_key)
if rule_key not in rule_mapper:
rule_mapper[rule_key] = rule
rule_counter[rule_key] = 1
else:
rule_counter[rule_key] += 1
rule_normalizer[rule.symbol] += 1
grammar = {}
next_id = 0
for key in rule_mapper:
rule = rule_mapper[key]
count = rule_counter[key]
norm = rule_normalizer[rule.symbol]
g_rule = Rule(next_id, rule.symbol, float(count)/norm, rule.amr, rule.parse)
grammar[next_id] = g_rule
next_id += 1
return grammar
if __name__ == "__main__":
extractor = ExtractorSynSem()
extractor.main(sys.argv)
| mit |
apache/incubator-airflow | airflow/providers/cncf/kubernetes/sensors/spark_kubernetes.py | 5 | 4691 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
from kubernetes import client
from airflow.exceptions import AirflowException
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class SparkKubernetesSensor(BaseSensorOperator):
"""
Checks sparkApplication object in kubernetes cluster:
.. seealso::
For more detail about Spark Application Object have a look at the reference:
https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/v1beta2-1.1.0-2.4.5/docs/api-docs.md#sparkapplication
:param application_name: spark Application resource name
:type application_name: str
:param namespace: the kubernetes namespace where the sparkApplication reside in
:type namespace: str
:param kubernetes_conn_id: the connection to Kubernetes cluster
:type kubernetes_conn_id: str
:param attach_log: determines whether logs for driver pod should be appended to the sensor log
:type attach_log: bool
"""
template_fields = ("application_name", "namespace")
FAILURE_STATES = ("FAILED", "UNKNOWN")
SUCCESS_STATES = ("COMPLETED",)
@apply_defaults
def __init__(
self,
*,
application_name: str,
attach_log: bool = False,
namespace: Optional[str] = None,
kubernetes_conn_id: str = "kubernetes_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_name = application_name
self.attach_log = attach_log
self.namespace = namespace
self.kubernetes_conn_id = kubernetes_conn_id
self.hook = KubernetesHook(conn_id=self.kubernetes_conn_id)
def _log_driver(self, application_state: str, response: dict) -> None:
if not self.attach_log:
return
status_info = response["status"]
if "driverInfo" not in status_info:
return
driver_info = status_info["driverInfo"]
if "podName" not in driver_info:
return
driver_pod_name = driver_info["podName"]
namespace = response["metadata"]["namespace"]
log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info
try:
log = ""
for line in self.hook.get_pod_logs(driver_pod_name, namespace=namespace):
log += line.decode()
log_method(log)
except client.rest.ApiException as e:
self.log.warning(
"Could not read logs for pod %s. It may have been disposed.\n"
"Make sure timeToLiveSeconds is set on your SparkApplication spec.\n"
"underlying exception: %s",
driver_pod_name,
e,
)
def poke(self, context: Dict) -> bool:
self.log.info("Poking: %s", self.application_name)
response = self.hook.get_custom_object(
group="sparkoperator.k8s.io",
version="v1beta2",
plural="sparkapplications",
name=self.application_name,
namespace=self.namespace,
)
try:
application_state = response["status"]["applicationState"]["state"]
except KeyError:
return False
if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES:
self._log_driver(application_state, response)
if application_state in self.FAILURE_STATES:
raise AirflowException("Spark application failed with state: %s" % application_state)
elif application_state in self.SUCCESS_STATES:
self.log.info("Spark application ended successfully")
return True
else:
self.log.info("Spark application is still in state: %s", application_state)
return False
| apache-2.0 |
mfm24/ChordViz | ChordViz.py | 1 | 20081 | # -*- coding: utf-8 -*-
"""
Created on Fri May 3 21:09:10 2013
@author: matt
# based on MyPlayer3_Callback (which is newer than MyPlayer3.py)
"""
from __future__ import division
import time, math, logging
import numpy as np
from threading import Lock, Thread
import itertools
# not sure I've added correct path in launchd.conf
# and export doesn't obviously work
import sys
sys.path.append('/Users/matt/Dropbox/personal/dev/PythonLibs/')
try:
from uidecorators import ui_decorators
use_ui = True
except ImportError:
# a bit nasty. We'll create an object were all members
# return a decorator function returning a decorator that does nothing!
class FakeUIDec:
def __getattr__(self, name):
def no_wrap(*args, **kwargs):
def wrap_creator(func):
def w(*args, **kwargs):
func(*args, **kwargs)
return w
return wrap_creator
return no_wrap
ui_decorators = FakeUIDec()
use_ui=False
try:
import pyaudio
p = pyaudio.PyAudio()
has_pyaudio = True
except ImportError:
logging.warn("PyAudio not found! - Will not be able to output any audio!")
has_pyaudio = False
def play_waveform(w):
def callback(in_data, frame_count, time_info, status):
# this requests upto 1024 frames?
with w.datalock:
ndata = w.data
if ndata is not None:
return (np.hstack([ndata]*(frame_count//1024)), pyaudio.paContinue)
else:
return (None, pyaudio.paComplete)
if has_pyaudio:
# open stream using callback (3)
play_waveform.stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=w.rate,
output=True,
frames_per_buffer=w.size,
stream_callback=callback)
play_waveform.stream = None
max_frequency = 22100 # we stop making notes above this
note_types = {
"PureTone": lambda harmonic: 1 if harmonic==0 else 0,
"Poisson0.5": lambda harmonic: poisson(0.5, harmonic),
"Poisson1": lambda harmonic: poisson(1, harmonic),
"Poisson2": lambda harmonic: poisson(2, harmonic),
"Poisson3": lambda harmonic: poisson(3, harmonic),
"Lorentz1": lambda harmonic: 1.0/(1.0+harmonic**2),
"Lorentz10": lambda harmonic: 10.0/(10.0+harmonic**2),
"Equal": lambda harmonic: 1,
"EqualOdd": lambda harmonic: 1 if harmonic%2==1 or harmonic==0 else 0,
"EqualEven": lambda harmonic: 1 if harmonic%2==0 else 0,
"OneOverX": lambda harmonic: 1.0/(harmonic+1.0)
}
equal_temperament_notes = [2 ** (x / 12.0) for x in range(12)]
just_intonation_notes = [1, 16 / 15., 9 / 8., 6 / 5., 5 / 4., 4 / 3., 45 / 32., 3 / 2., 8 / 5., 5 / 3., 16 / 9., 15 / 8.]
twelve_tone_names = ["I", "IIb", "II", "IIIb", "III", "IV", "IV#", "V", "VIb", "VI", "VIIb", "VII"]
class Waveform(object):
def __init__(self, size=1024*16, rate=44100):
self.size = size
self.rate = rate
self.data = np.zeros((size), dtype=np.int16)
self.datalock = Lock()
self.volume_amp = 0.1
self.form = lambda note: poisson(2, note)
self.notetype="Poisson1"
self.notefreq=440
self.on_notes_changed=[]
self._harmonics_slice = None
self.clear_notes()
def clear_notes(self):
self.notes = []
self()
def set_notes(self, notes):
self.clear_notes()
self.add_notes(notes)
self()
def add_notes(self, notes):
self.notes.append(list(notes))
self()
def __call__(self):
newdata = np.zeros((self.size), dtype=np.complex64)
for notegroup in self.notes:
for freq, mag in notegroup:
dphase=int (freq*self.size / self.rate )
logging.info("Adding note at pixel %s", dphase)
if dphase > len(newdata)/2:
continue # this is nyquist, can't go any higher
#let's scale mag by number of notes
newdata[dphase]=self.volume_amp*mag*32765/2
#make ft real
newdata[-dphase] = np.conj(newdata[dphase])
sqrtsumsq = math.sqrt((newdata**2).sum())
if sqrtsumsq:
newdata *= self.volume_amp * 2.0 * 32767.0 / sqrtsumsq
printimag = 0
if printimag:
complex_d=np.imag(np.fft.fft(newdata));
print "imag magnitude: ", np.sqrt(np.sum(complex_d**2))
newdata = np.asarray(np.real(np.fft.fft(newdata)), dtype=np.int16)
with self.datalock:
self.data = newdata
for f in self.on_notes_changed:
f()
def get_volume(self):
v = math.log(self.volume_amp, 10)*20
return v
@ui_decorators.slider(getfunc=get_volume, maximum=0, minimum=-50, scale=1)
def volume(self, value):
self.volume_amp = 10**(value/20.0)
self()
def get_note_type(self):
return self.notetype
@ui_decorators.combobox(
getfunc=get_note_type,
options=note_types.keys())
def note_type(self, t):
self.notetype = t
def get_harmonics_slice(self):
if self._harmonics_slice:
return ",".join(self._harmonics_slice)
else:
return ""
@ui_decorators.textbox(getfunc=get_harmonics_slice)
def harmonics_slice(self, n):
"""
Sets the harmonics to display
Should be either [start:]stop[:step]
or else a,b,c where a,b,c are indices to choose
"""
if n=="":
self._harmonics_slice = None
return
if ':' in n:
sc = [int(x or "0") for x in n.split(":")]
if len(sc)==1:
self._harmonics_slice = (None, sc[0], None)
elif len(sc) == 2:
self._harmonics_slice = (sc[0], sc[1], None)
else:
self._harmonics_slice = (sc[0], sc[1], sc[2])
else:
self._harmonics_slice = [int(x or "-1") for x in n.split(',')]
def get_root_frequency(self):
return self.notefreq
@ui_decorators.textbox(getfunc=get_root_frequency)
def root_frequency(self, val):
self.notefreq = float(val)
self()
def add_form(self, root):
if isinstance(self._harmonics_slice, list):
all_notes = list(notes_from_func(note_types[self.notetype], root))
notes = []
for i in self._harmonics_slice:
notes.append(all_notes[i])
else:
slice_args = self._harmonics_slice or (None,)
notes = itertools.islice(
notes_from_func(note_types[self.notetype], root),
*slice_args)
self.add_notes(notes)
@ui_decorators.button
def clear(self):
self.clear_notes()
@ui_decorators.button
def note_root(self):
self.add_form(self.notefreq)
self()
@ui_decorators.button
def note_major3rd(self):
self.add_form(self.notefreq*5.0/4.0)
self()
@ui_decorators.button
def note_fifth(self):
self.add_form(self.notefreq*6.0/4.0)
self()
@ui_decorators.button
def play_major_chord(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*5.0/4.0,
self.notefreq*6.0/4.0])
@ui_decorators.button
def test(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*7.0/8.0,
self.notefreq*6.0/4.0])
@ui_decorators.button
def play_minor_chord(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*12.0/10.0,
self.notefreq*15.0/10.0])
@ui_decorators.button
def play_minor_chord_fifth(self):
self.play_threaded_chord([self.notefreq,
self.notefreq*4.0/3.0,
self.notefreq*8.0/5.0])
def play_threaded_chord(self, roots):
def run_through():
for i,n in enumerate(roots):
self.clear_notes()
[self.add_form([]) for t in range(i)]
self.add_form(n)
time.sleep(1.5)
self.clear_notes()
for n in roots:
self.add_form(n)
Thread(target=run_through).start()
# run in interactive shell and use set_notes to play?
def poisson(l, n):
return math.exp(-l)*l**n/math.factorial(n)
def notes_from_func(func, root):
for h in itertools.count():
mag = func(h)
# we cut off until we reach max_frequency
if root+root*h > max_frequency:
return
yield root+root*h, mag
def cleanup():
if has_pyaudio:
play_waveform.stream.close()
p.terminate()
######################## UI Stuff ############################
# this could go in a separate file, but keeping it here for the
# moment
# creating a UI Options class for modifying the visualisation using
# out qt decorators
class UIOptions:
def __init__(self):
self._linear_freq_in_octaves = True
self.virtual_size = 1500,1500
self._inverse = True
self._show_just_notes = True
self._show_base_spiral = True
self._show_ET_notes = False # ET=equal temperament
def get_linear_freq_in_octaves(self):
return self._linear_freq_in_octaves
@ui_decorators.checkbox(getfunc=get_linear_freq_in_octaves)
def linear_freq_in_octaves(self, newval):
self._linear_freq_in_octaves = newval
notes_changed()
def get_show_base_spiral(self):
return self._show_base_spiral
@ui_decorators.checkbox(getfunc=get_show_base_spiral)
def show_base_spiral(self, newval):
self._show_base_spiral = newval
notes_changed()
def get_inverse(self):
return self._inverse
@ui_decorators.checkbox(getfunc=get_inverse)
def inverse(self, newval):
self._inverse = newval
notes_changed()
def get_show_just_notes(self):
return self._show_just_notes
@ui_decorators.checkbox(getfunc=get_show_just_notes)
def show_just_notes(self, newval):
self._show_just_notes = newval
notes_changed()
def get_show_ET_notes(self):
return self._show_ET_notes
@ui_decorators.checkbox(getfunc=get_show_ET_notes)
def show_ET_notes(self, newval):
self._show_ET_notes = newval
notes_changed()
def make_note_lines(root, named_notes, width, radius):
"""
For the dictionary named_notes, draws thin lines for each note
adding the key for the note to the SVG.
This way we can overlay scales on the diagrams.
"""
lines = []
for name, freq in named_notes.iteritems():
(x1, y1), theta = get_pos_theta_for_note(freq, root, 0, 0)
font_size = radius/16.0
lines.append(
'<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x1 + 2 * radius * math.sin(theta),
y1=y1, y2=y1 - 2 * radius * math.cos(theta),
width=width))
lines.append('<text x="{x}" y="{y}" font-size="{fs}">{text}</text>'.format(
x=x1 + radius * math.sin(theta),
y=y1 - radius * math.cos(theta),
text=name, fs=font_size))
return "\n".join(lines)
def get_pos_theta_for_note(f, root, root_radius, length):
"""
Return (x,y),theta where (x,y) is the starting position of the note
and theta is the angle the note should have
"""
# first, we calculate the octave and theta for the root
logf = math.log(f / root, 2)
note_fraction, octave = math.modf(logf)
if ui_opts.get_linear_freq_in_octaves():
note = (2**note_fraction - 1)
else:
note = note_fraction
theta = note * 2.0 * math.pi
centerx, centery = (x / 2 for x in ui_opts.virtual_size)
r = root_radius + (octave + note_fraction) * length
x = centerx + r * math.sin(theta)
y = centery - r * math.cos(theta)
return (x,y), theta
def make_spiral_lines_from_notes(root, notes,
length=75, root_radius=100,
stroke_width_scale=15):
"""
Is there a way to represent notes where octaves are still seperated but
we can see notes of the same pitch?
We could draw a spiral, where an octave is 360 degrees and on the next
ring out.
There's a similar idea here:
http://nastechservices.com/Spectrograms.html
How should we represent a 3:2 ratio? If wejust take log(x,2)*2*pi
then 3/2 is at 210deg (or 3.67rad). Is it worth making the scale linear,
and putting 3/2 at 180deg? We could also spiral so that 3/2f gets us to 180
deg then we stretch out the remaining part of the curve?
We'll try the linear for now.
It works, but not all 3/2 notes are 180deg from each other
(if the higher note is past the root, it's not)
Is there a way to do this? Maybe not, eg we make 5th = 3r/2 opposite root
and 3/2r = 9/4 != root and yet root still needs to be 180deg from it
"""
width_gamma = 0.2 # we use width^this as the width
centerx, centery = (x / 2 for x in ui_opts.virtual_size)
lines = []
for f, m in notes:
# we split the note into octave and note (0 - 1)
width = stroke_width_scale * math.pow(m, width_gamma)
(x1, y1), theta = get_pos_theta_for_note(f, root, root_radius, length)
x2 = x1 + 0.9 * length * math.sin(theta)
y2 = y1 - 0.9 * length * math.cos(theta)
lines.append('<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x2, y1=y1, y2=y2,
width=width))
return "\n".join(lines)
def make_spiral_octave_lines(root, length=75, root_radius=100, max_f=22100):
"""
Starting with the root note, draw the spiral on which
any higher frequency notes will sit. This way we can count
harmonics more easily
"""
width = 0.5
(x1, y1), _ = get_pos_theta_for_note(root, root, root_radius, length)
lines = []
step = int(root/50) or 1
for f in range(int(root), int(max_f), step):
(x2, y2), theta = get_pos_theta_for_note(f, root, root_radius, length)
lines.append('<line x1="{x1}" y1="{y1}" x2="{x2}" y2="{y2}" stroke-width="{width}"/>'.format(
x1=x1, x2=x2, y1=y1, y2=y2,
width=width))
x1, y1 = x2, y2
return "\n".join(lines)
rgb_colors = [0xFF0000, 0x00FF00, 0x0000FF]
cym_colors = [0x00FFFF, 0xFF00FF, 0xFFFF00]
white = 0xFFFFFFFF
black = 0xFF000000
# some QT specific stuff follows:
import PySide.QtCore
import PySide.QtGui
import PySide.QtSvg
def render_svg(svg, qp):
r = PySide.QtSvg.QSvgRenderer()
w,h = ui_opts.virtual_size
ret = '<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="{}" height="{}">'.format(w, h)
ret += svg
ret += "</svg>"
# print ret
r.load(PySide.QtCore.QByteArray(ret))
assert r.isValid()
r.render(qp)
def raw_svg_to_group(svg, color, extras=""):
ret = '<g stroke="#{0:06X}" fill="#{0:06X}" {1}>'.format(
color & 0xFFFFFF, extras)
ret += svg
ret += "</g>"
return ret
from uidecorators.qt_framework import Framework
def notes_changed(*args):
mode = "inverse" if ui_opts.get_inverse() else "normal"
qim = PySide.QtGui.QImage(d.widget().width(), d.widget().height(), PySide.QtGui.QImage.Format.Format_ARGB32)
qp = PySide.QtGui.QPainter(qim)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.SmoothPixmapTransform)
if mode == "inverse":
#qim.fill(white)
qp.setCompositionMode(qp.CompositionMode.CompositionMode_Darken)
colors = cym_colors
default_foreground = black
default_background = white
mode = "darken"
else:
#qim.fill(black)
qp.setCompositionMode(qp.CompositionMode.CompositionMode_Lighten)
colors = rgb_colors
default_foreground = white
default_background = black
mode = "lighten"
default_foreground = 0x888888
root = w.get_root_frequency()
all_svgs=[]
num_octaves = math.log(max_frequency / root, 2)
# let's scale note height and width with number of octaves we're drawing
note_length = 400.0 / num_octaves
note_width = 500 / 2**num_octaves
# we'll set the background with a svg rect
svg = raw_svg_to_group('<rect width="1500" height="1500" />', default_background)
all_svgs.append(svg)
for check, notes in [(ui_opts.get_show_just_notes, just_intonation_notes),
(ui_opts.get_show_ET_notes, equal_temperament_notes)]:
if check():
overlay = make_note_lines(
root,
{i: f * root for i, f in zip(twelve_tone_names, notes)},
0.5, 600)
svg = raw_svg_to_group(overlay, default_foreground)
all_svgs.append(svg)
if ui_opts.get_show_base_spiral():
overlay = make_spiral_octave_lines(root, length=note_length)
svg = raw_svg_to_group(overlay, default_foreground)
all_svgs.append(svg)
theta = 0
width, height = ui_opts.virtual_size
for notegroup, col in zip(w.notes, colors):
notegrp_svg = make_spiral_lines_from_notes(
root, notegroup, length=note_length, stroke_width_scale=note_width)
notegrp_svg += '<circle r="{}" cx="{}" cy="{}"/>'.format(
width / 30.0, width / 10.0 + width / 45.0 * math.sin(theta),
width / 10.0 + width / 45.0 * math.cos(theta))
theta += math.pi*2.0/len(w.notes)
# convert to a svg group with some extra tags to make inkscape happy
svg = raw_svg_to_group(
notegrp_svg, col,
extras='inkscape:groupmode="layer" filter="url(#blend)"')
all_svgs.append(svg)
# finally we'll render tham all
for svg in all_svgs:
render_svg(svg, qp)
# try to save an inkscape compatible svg file.
# we can add a darken/lighten filter, and we need to add
# enable-background="new" to the svg header and the
# inkscape ns:
with open("out.svg", 'w') as f:
f.write('<svg xmlns="http://www.w3.org/2000/svg" '
'xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" '
'version="1.1" width="{}" height="{}" '
'enable-background="new">'.format(width, height))
f.write('<filter id="blend">'
'<feBlend in2="BackgroundImage" mode="{0}" />'
'</filter>'.format(mode))
f.write("\n".join(all_svgs))
f.write("</svg>")
d.widget().setPixmap(PySide.QtGui.QPixmap.fromImage(qim))
# qim.save("out.png", 'PNG')
qp = None # we have to make sure qim is deleted before QPainter?
if __name__=="__main__":
w=Waveform()
play_waveform(w)
if use_ui:
ui_opts = UIOptions()
f = Framework()
f.get_main_window().resize(800,600)
d=PySide.QtGui.QDockWidget("Note Visualization")
d.setWidget(PySide.QtGui.QLabel())
f.get_main_window().addDockWidget(PySide.QtCore.Qt.RightDockWidgetArea, d)
# play notes is threaded, so we need to call notes_changed from the
# ui thread.
w.on_notes_changed.append(lambda: f.run_on_ui_thread(notes_changed))
f.display_widgets([f.get_obj_widget(w), f.get_obj_widget(ui_opts)])
f.close()
| mit |
ferabra/edx-platform | common/djangoapps/course_action_state/tests/test_managers.py | 126 | 7219 | # pylint: disable=invalid-name, attribute-defined-outside-init
"""
Tests for basic common operations related to Course Action State managers
"""
from ddt import ddt, data
from django.test import TestCase
from collections import namedtuple
from opaque_keys.edx.locations import CourseLocator
from course_action_state.models import CourseRerunState
from course_action_state.managers import CourseActionStateItemNotFoundError
# Sequence of Action models to be tested with ddt.
COURSE_ACTION_STATES = (CourseRerunState, )
class TestCourseActionStateManagerBase(TestCase):
"""
Base class for testing Course Action State Managers.
"""
def setUp(self):
super(TestCourseActionStateManagerBase, self).setUp()
self.course_key = CourseLocator("test_org", "test_course_num", "test_run")
@ddt
class TestCourseActionStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionStateManager.
"""
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found_is_false(self, action_class):
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.update_state(self.course_key, "fake_state", allow_not_found=False)
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found(self, action_class):
action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
self.assertIsNotNone(
action_class.objects.find_first(course_key=self.course_key)
)
@data(*COURSE_ACTION_STATES)
def test_delete(self, action_class):
obj = action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
action_class.objects.delete(obj.id)
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.find_first(course_key=self.course_key)
@ddt
class TestCourseActionUIStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionUIStateManager.
"""
def init_course_action_states(self, action_class):
"""
Creates course action state entries with different states for the given action model class.
Creates both displayable (should_display=True) and non-displayable (should_display=False) entries.
"""
def create_course_states(starting_course_num, ending_course_num, state, should_display=True):
"""
Creates a list of course state tuples by creating unique course locators with course-numbers
from starting_course_num to ending_course_num.
"""
CourseState = namedtuple('CourseState', 'course_key, state, should_display')
return [
CourseState(CourseLocator("org", "course", "run" + str(num)), state, should_display)
for num in range(starting_course_num, ending_course_num)
]
NUM_COURSES_WITH_STATE1 = 3
NUM_COURSES_WITH_STATE2 = 3
NUM_COURSES_WITH_STATE3 = 3
NUM_COURSES_NON_DISPLAYABLE = 3
# courses with state1 and should_display=True
self.courses_with_state1 = create_course_states(
0,
NUM_COURSES_WITH_STATE1,
'state1'
)
# courses with state2 and should_display=True
self.courses_with_state2 = create_course_states(
NUM_COURSES_WITH_STATE1,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
'state2'
)
# courses with state3 and should_display=True
self.courses_with_state3 = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
'state3'
)
# all courses with should_display=True
self.course_actions_displayable_states = (
self.courses_with_state1 + self.courses_with_state2 + self.courses_with_state3
)
# courses with state3 and should_display=False
self.courses_with_state3_non_displayable = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3 + NUM_COURSES_NON_DISPLAYABLE,
'state3',
should_display=False,
)
# create course action states for all courses
for CourseState in self.course_actions_displayable_states + self.courses_with_state3_non_displayable:
action_class.objects.update_state(
CourseState.course_key,
CourseState.state,
should_display=CourseState.should_display,
allow_not_found=True
)
def assertCourseActionStatesEqual(self, expected, found):
"""Asserts that the set of course keys in the expected state equal those that are found"""
self.assertSetEqual(
set(course_action_state.course_key for course_action_state in expected),
set(course_action_state.course_key for course_action_state in found))
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display(self, action_class):
self.init_course_action_states(action_class)
self.assertCourseActionStatesEqual(
self.course_actions_displayable_states,
action_class.objects.find_all(should_display=True),
)
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display_filter_exclude(self, action_class):
self.init_course_action_states(action_class)
for course_action_state, filter_state, exclude_state in (
(self.courses_with_state1, 'state1', None), # filter for state1
(self.courses_with_state2, 'state2', None), # filter for state2
(self.courses_with_state2 + self.courses_with_state3, None, 'state1'), # exclude state1
(self.courses_with_state1 + self.courses_with_state3, None, 'state2'), # exclude state2
(self.courses_with_state1, 'state1', 'state2'), # filter for state1, exclude state2
([], 'state1', 'state1'), # filter for state1, exclude state1
):
self.assertCourseActionStatesEqual(
course_action_state,
action_class.objects.find_all(
exclude_args=({'state': exclude_state} if exclude_state else None),
should_display=True,
**({'state': filter_state} if filter_state else {})
)
)
def test_kwargs_in_update_state(self):
destination_course_key = CourseLocator("org", "course", "run")
source_course_key = CourseLocator("source_org", "source_course", "source_run")
CourseRerunState.objects.update_state(
course_key=destination_course_key,
new_state='state1',
allow_not_found=True,
source_course_key=source_course_key,
)
found_action_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
self.assertEquals(source_course_key, found_action_state.source_course_key)
| agpl-3.0 |
pymedusa/SickRage | ext/boto/sdb/db/blob.py | 153 | 2437 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.compat import six
class Blob(object):
"""Blob object"""
def __init__(self, value=None, file=None, id=None):
self._file = file
self.id = id
self.value = value
@property
def file(self):
from StringIO import StringIO
if self._file:
f = self._file
else:
f = StringIO(self.value)
return f
def __str__(self):
return six.text_type(self).encode('utf-8')
def __unicode__(self):
if hasattr(self.file, "get_contents_as_string"):
value = self.file.get_contents_as_string()
else:
value = self.file.getvalue()
if isinstance(value, six.text_type):
return value
else:
return value.decode('utf-8')
def read(self):
if hasattr(self.file, "get_contents_as_string"):
return self.file.get_contents_as_string()
else:
return self.file.read()
def readline(self):
return self.file.readline()
def next(self):
return next(self.file)
def __iter__(self):
return iter(self.file)
@property
def size(self):
if self._file:
return self._file.size
elif self.value:
return len(self.value)
else:
return 0
| gpl-3.0 |
pymedusa/SickRage | ext/boto/ec2/__init__.py | 22 | 3094 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
service from AWS.
"""
from boto.ec2.connection import EC2Connection
from boto.regioninfo import RegionInfo, get_regions, load_regions
from boto.regioninfo import connect
RegionData = load_regions().get('ec2', {})
def regions(**kw_params):
"""
Get all available regions for the EC2 service.
You may pass any of the arguments accepted by the EC2Connection
object's constructor as keyword arguments and they will be
passed along to the EC2Connection object.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
return get_regions('ec2', connection_cls=EC2Connection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.connection.EC2Connection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.connection.EC2Connection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
if 'region' in kw_params and isinstance(kw_params['region'], RegionInfo)\
and region_name == kw_params['region'].name:
return EC2Connection(**kw_params)
return connect('ec2', region_name,
connection_cls=EC2Connection, **kw_params)
def get_region(region_name, **kw_params):
"""
Find and return a :class:`boto.ec2.regioninfo.RegionInfo` object
given a region name.
:type: str
:param: The name of the region.
:rtype: :class:`boto.ec2.regioninfo.RegionInfo`
:return: The RegionInfo object for the given region or None if
an invalid region name is provided.
"""
for region in regions(**kw_params):
if region.name == region_name:
return region
return None
| gpl-3.0 |
GunoH/intellij-community | python/helpers/py2only/docutils/parsers/rst/languages/de.py | 121 | 3465 | # -*- coding: utf-8 -*-
# $Id: de.py 7223 2011-11-21 16:43:06Z milde $
# Authors: Engelbert Gruber <[email protected]>;
# Lea Wiemann <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'achtung': 'attention',
'vorsicht': 'caution',
'code': 'code',
'gefahr': 'danger',
'fehler': 'error',
'hinweis': 'hint',
'wichtig': 'important',
'notiz': 'note',
'tipp': 'tip',
'warnung': 'warning',
'ermahnung': 'admonition',
'kasten': 'sidebar',
'seitenkasten': 'sidebar',
'thema': 'topic',
'zeilen-block': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubrik': 'rubric',
'epigraph': 'epigraph',
'highlights (translation required)': 'highlights',
u'pull-quote': 'pull-quote', # commonly used in German too
u'seitenansprache': 'pull-quote', # cf. http://www.typografie.info/2/wiki.php?title=Seitenansprache
'zusammengesetzt': 'compound',
'verbund': 'compound',
u'container': 'container',
#'fragen': 'questions',
'tabelle': 'table',
'csv-tabelle': 'csv-table',
'list-table (translation required)': 'list-table',
u'mathe': 'math',
u'formel': 'math',
'meta': 'meta',
#'imagemap': 'imagemap',
'bild': 'image',
'abbildung': 'figure',
u'unverändert': 'raw',
u'roh': 'raw',
u'einfügen': 'include',
'ersetzung': 'replace',
'ersetzen': 'replace',
'ersetze': 'replace',
'unicode': 'unicode',
'datum': 'date',
'klasse': 'class',
'rolle': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'inhalt': 'contents',
'kapitel-nummerierung': 'sectnum',
'abschnitts-nummerierung': 'sectnum',
u'linkziel-fußfnoten': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'fußfnoten': 'footnotes',
#'zitate': 'citations',
}
"""German name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abkürzung': 'abbreviation',
'akronym': 'acronym',
u'code': 'code',
'index': 'index',
'tiefgestellt': 'subscript',
'hochgestellt': 'superscript',
'titel-referenz': 'title-reference',
'pep-referenz': 'pep-reference',
'rfc-referenz': 'rfc-reference',
'betonung': 'emphasis',
'fett': 'strong',
u'wörtlich': 'literal',
u'mathe': 'math',
'benannte-referenz': 'named-reference',
'unbenannte-referenz': 'anonymous-reference',
u'fußfnoten-referenz': 'footnote-reference',
'zitat-referenz': 'citation-reference',
'ersetzungs-referenz': 'substitution-reference',
'ziel': 'target',
'uri-referenz': 'uri-reference',
u'unverändert': 'raw',
u'roh': 'raw',}
"""Mapping of German role names to canonical role names for interpreted text.
"""
| apache-2.0 |
openfun/edx-platform | common/djangoapps/pipeline_mako/__init__.py | 140 | 2444 | from edxmako.shortcuts import render_to_string
from pipeline.conf import settings
from pipeline.packager import Packager
from pipeline.utils import guess_type
from static_replace import try_staticfiles_lookup
def compressed_css(package_name, raw=False):
package = settings.PIPELINE_CSS.get(package_name, {})
if package:
package = {package_name: package}
packager = Packager(css_packages=package, js_packages={})
package = packager.package_for('css', package_name)
if settings.PIPELINE:
return render_css(package, package.output_filename, raw=raw)
else:
paths = packager.compile(package.paths)
return render_individual_css(package, paths, raw=raw)
def render_css(package, path, raw=False):
template_name = package.template_name or "mako/css.html"
context = package.extra_context
url = try_staticfiles_lookup(path)
if raw:
url += "?raw"
context.update({
'type': guess_type(path, 'text/css'),
'url': url,
})
return render_to_string(template_name, context)
def render_individual_css(package, paths, raw=False):
tags = [render_css(package, path, raw) for path in paths]
return '\n'.join(tags)
def compressed_js(package_name):
package = settings.PIPELINE_JS.get(package_name, {})
if package:
package = {package_name: package}
packager = Packager(css_packages={}, js_packages=package)
package = packager.package_for('js', package_name)
if settings.PIPELINE:
return render_js(package, package.output_filename)
else:
paths = packager.compile(package.paths)
templates = packager.pack_templates(package)
return render_individual_js(package, paths, templates)
def render_js(package, path):
template_name = package.template_name or "mako/js.html"
context = package.extra_context
context.update({
'type': guess_type(path, 'text/javascript'),
'url': try_staticfiles_lookup(path)
})
return render_to_string(template_name, context)
def render_inline_js(package, js):
context = package.extra_context
context.update({
'source': js
})
return render_to_string("mako/inline_js.html", context)
def render_individual_js(package, paths, templates=None):
tags = [render_js(package, js) for js in paths]
if templates:
tags.append(render_inline_js(package, templates))
return '\n'.join(tags)
| agpl-3.0 |
takeshineshiro/horizon | openstack_dashboard/dashboards/project/networks/tests.py | 6 | 99697 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.html import escape
import six
from horizon.workflows import views
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets import tables\
as subnets_tables
from openstack_dashboard.dashboards.project.networks import tables\
as networks_tables
from openstack_dashboard.dashboards.project.networks import workflows
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:networks:index')
def form_data_subnet(subnet,
name=None, cidr=None, ip_version=None,
gateway_ip='', enable_dhcp=None,
allocation_pools=None,
dns_nameservers=None,
host_routes=None):
def get_value(value, default):
return default if value is None else value
data = {}
data['subnet_name'] = get_value(name, subnet.name)
data['cidr'] = get_value(cidr, subnet.cidr)
data['ip_version'] = get_value(ip_version, subnet.ip_version)
gateway_ip = subnet.gateway_ip if gateway_ip == '' else gateway_ip
data['gateway_ip'] = gateway_ip or ''
data['no_gateway'] = (gateway_ip is None)
data['enable_dhcp'] = get_value(enable_dhcp, subnet.enable_dhcp)
if data['ip_version'] == 6:
data['ipv6_modes'] = subnet.ipv6_modes
pools = get_value(allocation_pools, subnet.allocation_pools)
data['allocation_pools'] = _str_allocation_pools(pools)
nameservers = get_value(dns_nameservers, subnet.dns_nameservers)
data['dns_nameservers'] = _str_dns_nameservers(nameservers)
routes = get_value(host_routes, subnet.host_routes)
data['host_routes'] = _str_host_routes(routes)
return data
def form_data_no_subnet():
return {'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': '',
'no_gateway': False,
'enable_dhcp': True,
'allocation_pools': '',
'dns_nameservers': '',
'host_routes': ''}
def _str_allocation_pools(allocation_pools):
if isinstance(allocation_pools, str):
return allocation_pools
return '\n'.join(['%s,%s' % (pool['start'], pool['end'])
for pool in allocation_pools])
def _str_dns_nameservers(dns_nameservers):
if isinstance(dns_nameservers, str):
return dns_nameservers
return '\n'.join(dns_nameservers)
def _str_host_routes(host_routes):
if isinstance(host_routes, str):
return host_routes
return '\n'.join(['%s,%s' % (route['destination'], route['nexthop'])
for route in host_routes])
class NetworkTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_index_network_list_exception(self):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).MultipleTimes().AndRaise(self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail(self):
self._test_network_detail()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_with_mac_learning(self):
self._test_network_detail(mac_learning=True)
def _test_network_detail(self, mac_learning=False):
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 5
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception(self):
self._test_network_detail_network_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',)})
def test_network_detail_network_exception_with_mac_learning(self):
self._test_network_detail_network_exception(mac_learning=True)
def _test_network_detail_network_exception(self, mac_learning=False):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_subnet_exception(self):
self._test_network_detail_subnet_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_subnet_exception_with_mac_learning(self):
self._test_network_detail_subnet_exception(mac_learning=True)
def _test_network_detail_subnet_exception(self, mac_learning=False):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = 5
quota_data['subnets']['available'] = 5
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_port_exception(self):
self._test_network_detail_port_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_network_detail_port_exception_with_mac_learning(self):
self._test_network_detail_port_exception(mac_learning=True)
def _test_network_detail_port_exception(self, mac_learning=False):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 5
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_get(self,
test_with_profile=False):
if test_with_profile:
net_profiles = self.net_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>',
'<CreateSubnetDetail: createsubnetdetailaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_get_with_profile(self):
self.test_network_create_get(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_profile(self):
self.test_network_create_post(test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'subnet_create',
'profile_list',)})
def test_network_create_post_with_subnet(self,
test_with_profile=False,
test_with_ipv6=True):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
subnet_params = {'network_id': network.id,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip,
'enable_dhcp': subnet.enable_dhcp}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
if not test_with_ipv6:
subnet.ip_version = 4
subnet_params['ip_version'] = subnet.ip_version
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
**subnet_params).AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_w_profile(self):
self.test_network_create_post_with_subnet(test_with_profile=True)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ipv6': False})
def test_create_network_with_ipv6_disabled(self):
self.test_network_create_post_with_subnet(test_with_ipv6=False)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post_network_exception(self,
test_with_profile=False):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_nw_exception_w_profile(self):
self.test_network_create_post_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'profile_list')})
def test_network_create_post_with_subnet_network_exception(
self,
test_with_profile=False,
test_with_subnetpool=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_nw_exception_w_profile(self):
self.test_network_create_post_with_subnet_network_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('network_create',
'network_delete',
'subnet_create',
'profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_subnet_exception(
self,
test_with_profile=False,
):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_delete(IsA(http.HttpRequest),
network.id)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_subnet_exception_w_profile(self):
self.test_network_create_post_with_subnet_subnet_exception(
test_with_profile=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_nocidr(self,
test_with_profile=False,
test_with_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_snpool:
form_data['subnetpool_id'] = ''
form_data['prefixlen'] = ''
form_data.update(form_data_subnet(subnet, cidr='',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address", '
'"Address pool" or '
'clear "Create Subnet" checkbox.'))
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_no_cidr_w_profile(self):
self.test_network_create_post_with_subnet_nocidr(
test_with_profile=True)
def test_network_create_post_with_subnet_nocidr_nosubnetpool(self):
self.test_network_create_post_with_subnet_nocidr(
test_with_snpool=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_cidr_without_mask(
self,
test_with_profile=False,
test_with_subnetpool=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data['prefixlen'] = subnetpool.default_prefixlen
form_data.update(form_data_subnet(subnet, cidr='10.0.0.0',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = "The subnet in the Network Address is too small (/32)."
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_nw_create_post_w_subnet_cidr_without_mask_w_profile(self):
self.test_network_create_post_with_subnet_cidr_without_mask(
test_with_profile=True)
def test_network_create_post_with_subnet_cidr_without_mask_w_snpool(self):
self.test_network_create_post_with_subnet_cidr_without_mask(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_cidr_inconsistent(
self,
test_with_profile=False,
test_with_subnetpool=False
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data['prefixlen'] = subnetpool.default_prefixlen
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_cidr_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_cidr_inconsistent(
test_with_profile=True)
def test_network_create_post_with_subnet_cidr_inconsistent_w_snpool(self):
self.test_network_create_post_with_subnet_cidr_inconsistent(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('profile_list',
'is_extension_supported',
'subnetpool_list',)})
def test_network_create_post_with_subnet_gw_inconsistent(
self,
test_with_profile=False,
test_with_subnetpool=False,
):
network = self.networks.first()
subnet = self.subnets.first()
if test_with_profile:
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if test_with_profile:
form_data['net_profile_id'] = net_profile_id
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data['prefixlen'] = subnetpool.default_prefixlen
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_network_create_post_with_subnet_gw_inconsistent_w_profile(self):
self.test_network_create_post_with_subnet_gw_inconsistent(
test_with_profile=True)
def test_network_create_post_with_subnet_gw_inconsistent_w_snpool(self):
self.test_network_create_post_with_subnet_gw_inconsistent(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/update.html')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndReturn(network)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
network.subnets = []
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'network_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
network.subnets = [subnet.id for subnet in network.subnets]
subnet_id = network.subnets[0]
api.neutron.network_get(IsA(http.HttpRequest),
network.id,
expand_subnet=False)\
.AndReturn(network)
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet_id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
class NetworkSubnetTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_get',
'subnet_get',)})
def test_subnet_detail(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self, test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes(self):
network = self.networks.list()[1]
subnet = self.subnets.list()[1]
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes_no_gateway(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, gateway_ip=None)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {}
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_subnet_create_post_network_exception_with_subnetpool(self):
self.test_subnet_create_post_network_exception(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_cidr_inconsistent(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertFormErrors(res, 1, expected_msg)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
def test_subnet_create_post_cidr_inconsistent_with_subnetpool(self):
self.test_subnet_create_post_cidr_inconsistent(
test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_gw_inconsistent(self,
test_with_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_with_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
def test_subnet_create_post_gw_inconsistent_with_subnetpool(self):
self.test_subnet_create_post_gw_inconsistent(test_with_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_start_only(self,
test_w_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_w_snpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# Start only allocation_pools
allocation_pools = '10.0.0.2'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
def test_subnet_create_post_invalid_pools_start_only_with_subnetpool(self):
self.test_subnet_create_post_invalid_pools_start_only(
test_w_snpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_three_entries(self,
t_w_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if t_w_snpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# pool with three entries
allocation_pools = '10.0.0.2,10.0.0.3,10.0.0.4'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
def test_subnet_create_post_invalid_pools_three_entries_w_subnetpool(self):
self.test_subnet_create_post_invalid_pools_three_entries(
t_w_snpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_invalid_address(self,
t_w_snpl=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if t_w_snpl:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# end address is not a valid IP address
allocation_pools = '10.0.0.2,invalid_address'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[1])
def test_subnet_create_post_invalid_pools_invalid_address_w_snpool(self):
self.test_subnet_create_post_invalid_pools_invalid_address(
t_w_snpl=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_ip_network(self,
test_w_snpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_w_snpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# start address is CIDR
allocation_pools = '10.0.0.2/24,10.0.0.5'
form_data.update(form_data_subnet(subnet,
allocation_pools=allocation_pools))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[0])
def test_subnet_create_post_invalid_pools_ip_network_with_subnetpool(self):
self.test_subnet_create_post_invalid_pools_ip_network(
test_w_snpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_pools_start_larger_than_end(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# start address is larger than end address
allocation_pools = '10.0.0.254,10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start address is larger than end address '
'(value=%s)' % allocation_pools)
def test_subnet_create_post_invalid_pools_start_larger_than_end_tsn(self):
self.test_subnet_create_post_invalid_pools_start_larger_than_end(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_nameservers(self,
test_w_subnetpool=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if test_w_subnetpool:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data.update(form_data_subnet(subnet,
dns_nameservers=dns_nameservers,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
def test_subnet_create_post_invalid_nameservers_with_subnetpool(self):
self.test_subnet_create_post_invalid_nameservers(
test_w_subnetpool=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_destination_only(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# Start only host_route
host_routes = '192.168.0.0/24'
form_data.update(form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
def test_subnet_create_post_invalid_routes_destination_only_w_snpool(self):
self.test_subnet_create_post_invalid_routes_destination_only(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_three_entries(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data.update(form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
def test_subnet_create_post_invalid_routes_three_entries_with_tsn(self):
self.test_subnet_create_post_invalid_routes_three_entries(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_invalid_destination(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data.update(form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
def test_subnet_create_post_invalid_routes_invalid_destination_tsn(self):
self.test_subnet_create_post_invalid_routes_invalid_destination(
tsn=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_invalid_routes_nexthop_ip_network(self,
tsn=False):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
self.mox.ReplayAll()
form_data = {}
if tsn:
subnetpool = self.subnetpools.first()
form_data['subnetpool'] = subnetpool.id
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data.update(form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[]))
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
def test_subnet_create_post_invalid_routes_nexthop_ip_network_tsn(self):
self.test_subnet_create_post_invalid_routes_nexthop_ip_network(
tsn=True)
@test.create_stubs({api.neutron: ('is_extension_supported',
'network_get',
'subnet_create',
'subnetpool_list',)})
def test_v6subnet_create_post(self):
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation').\
AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest)).\
AndReturn(self.subnetpools.list())
network = self.networks.get(name="v6_net1")
subnet = self.subnets.get(name="v6_subnet1")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_v6subnet_create_post_with_slaac_attributes(self):
network = self.networks.get(name="v6_net2")
subnet = self.subnets.get(name="v6_subnet2")
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
ipv6_address_mode='slaac',
ipv6_ra_mode='slaac')\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_gateway_ip(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
gateway_ip = '10.0.0.100'
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=gateway_ip,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_no_gateway(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=None,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_additional_attributes(self):
subnet = self.subnets.list()[1]
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
start = subnet.allocation_pools[0]['start']
end = subnet.allocation_pools[0]['end']
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=False,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes,
allocation_pools=[{'start': start,
'end': end}])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
enable_dhcp=False)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_nameservers(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_destination_only(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_three_entries(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_invalid_destination(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_nexthop_ip_network(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete(self):
self._test_subnet_delete()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_with_mac_learning(self):
self._test_subnet_delete(mac_learning=True)
def _test_subnet_delete(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception(self):
self._test_subnet_delete_exception()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',
'is_extension_supported',)})
def test_subnet_delete_exception_with_mac_learning(self):
self._test_subnet_delete_exception(mac_learning=True)
def _test_subnet_delete_exception(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
class NetworkPortTests(test.TestCase):
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'project/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
def _test_port_update_get(self, mac_learning=False, binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
def _test_port_update_post(self, mac_learning=False, binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True)
def _test_port_update_post_exception(self, mac_learning=False,
binding=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
class NetworkViewTests(test.TestCase):
def _test_create_button_disabled_when_quota_exceeded(
self, expected_string, network_quota=5, subnet_quota=5):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = network_quota
quota_data['subnets']['available'] = subnet_quota
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_network_create_button_disabled_when_quota_exceeded_index(self):
create_link = networks_tables.CreateNetwork()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self._test_create_button_disabled_when_quota_exceeded(expected_string,
network_quota=0)
@test.create_stubs({api.neutron: ('network_list',),
quotas: ('tenant_quota_usages',)})
def test_subnet_create_button_disabled_when_quota_exceeded_index(self):
network_id = self.networks.first().id
create_link = networks_tables.CreateSubnet()
url = reverse(create_link.get_link_url(), args=[network_id])
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__row_%s__action_subnet'>%s</a>" \
% (url, " ".join(classes), network_id, link_name)
self._test_create_button_disabled_when_quota_exceeded(expected_string,
subnet_quota=0)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',),
quotas: ('tenant_quota_usages',)})
def test_subnet_create_button_disabled_when_quota_exceeded_detail(self):
network_id = self.networks.first().id
quota_data = self.quota_usages.first()
quota_data['subnets']['available'] = 0
api.neutron.network_get(
IsA(http.HttpRequest), network_id)\
.MultipleTimes().AndReturn(self.networks.first())
api.neutron.subnet_list(
IsA(http.HttpRequest), network_id=network_id)\
.AndReturn(self.subnets.list())
api.neutron.port_list(
IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'mac-learning')\
.AndReturn(False)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
self.assertItemsEqual(subnets, self.subnets.list())
class FakeTable(object):
kwargs = {'network_id': network_id}
create_link = subnets_tables.CreateSubnet()
create_link.table = FakeTable()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='subnets__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
| apache-2.0 |
cpg1111/kubernetes | cluster/juju/charms/trusty/kubernetes/unit_tests/lib/test_registrator.py | 96 | 2163 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mock import MagicMock, patch
from path import Path
import pytest
import sys
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
from lib.registrator import Registrator
class TestRegistrator():
def setup_method(self, method):
self.r = Registrator()
def test_data_type(self):
if type(self.r.data) is not dict:
pytest.fail("Invalid type")
@patch('json.loads')
@patch('httplib.HTTPConnection')
def test_register(self, httplibmock, jsonmock):
self.r.register('foo', 80, '/v1/test')
httplibmock.assert_called_with('foo', 80, timeout=12)
requestmock = httplibmock().request
requestmock.assert_called_with(
"POST", "/v1/test",
json.dumps(self.r.data),
{"Content-type": "application/json",
"Accept": "application/json"})
def test_command_succeeded(self):
response = MagicMock()
result = json.loads('{"status": "Failure", "kind": "Status", "code": 409, "apiVersion": "v1", "reason": "AlreadyExists", "details": {"kind": "node", "name": "10.200.147.200"}, "message": "node \\"10.200.147.200\\" already exists", "creationTimestamp": null}') # noqa
response.status = 200
self.r.command_succeeded(response, result)
response.status = 409
self.r.command_succeeded(response, result)
response.status = 500
with pytest.raises(RuntimeError):
self.r.command_succeeded(response, result)
| apache-2.0 |
dubourg/openturns | python/test/t_DistFunc_noncentralchisquare.py | 8 | 2054 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# NonCentralChiSquare related functions
# dNonCentralChiSquare
nuMin = 0.2
nuMax = 5.0
n1 = 5
lambdaMin = 0.2
lambdaMax = 5.0
n2 = 5
xMin = 0.1
xMax = 0.9
nX = 5
for i1 in range(n1):
nu = nuMin + (nuMax - nuMin) * i1 / (n1 - 1)
for i2 in range(n2):
lambda_ = lambdaMin + (lambdaMax - lambdaMin) * i2 / (n2 - 1)
for iX in range(nX):
x = xMin + (xMax - xMin) * iX / (nX - 1)
print("dNonCentralChiSquare(", nu, ", ", lambda_, ", %.12g" %
x, ")=%.6g" % DistFunc.dNonCentralChiSquare(nu, lambda_, x))
# pNonCentralChiSquare
nuMin = 0.2
nuMax = 5.0
n1 = 5
lambdaMin = 0.2
lambdaMax = 5.0
n2 = 5
xMin = 0.1
xMax = 0.9
nX = 5
for i1 in range(n1):
nu = nuMin + (nuMax - nuMin) * i1 / (n1 - 1)
for i2 in range(n2):
lambda_ = lambdaMin + (lambdaMax - lambdaMin) * i2 / (n2 - 1)
for iX in range(nX):
x = xMin + (xMax - xMin) * iX / (nX - 1)
print("pNonCentralChiSquare(", nu, ", ", lambda_, ", %.12g" % x, ")=%.6g" % DistFunc.pNonCentralChiSquare(
nu, lambda_, x), ", complementary=%.6g" % DistFunc.pNonCentralChiSquare(nu, lambda_, x, True))
# rNonCentralChiSquare
nuMin = 0.2
nuMax = 5.0
n1 = 5
lambdaMin = 0.2
lambdaMax = 5.0
n2 = 5
nR = 5
for i1 in range(n1):
nu = nuMin + (nuMax - nuMin) * i1 / (n1 - 1)
for i2 in range(n2):
lambda_ = lambdaMin + (lambdaMax - lambdaMin) * i2 / (n2 - 1)
for iR in range(nR):
print("rNonCentralChiSquare(", nu, ", ", lambda_, ")=%.6g" %
DistFunc.rNonCentralChiSquare(nu, lambda_))
except:
import sys
print("t_DistFunc_noncentralchisquare.py",
sys.exc_info()[0], sys.exc_info()[1])
| gpl-3.0 |
bopo/cookiecutter-django | {{cookiecutter.project_slug}}/config/settings/production.py | 2 | 11973 | # -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
{% if cookiecutter.use_sentry_for_error_reporting == 'y' %}
- Use sentry for error logging
{% endif %}
{% if cookiecutter.use_opbeat == 'y' %}
- Use opbeat for error reporting
{% endif %}
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
{% if cookiecutter.use_sentry_for_error_reporting == 'y' %}
import logging
{% endif %}
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
{%- if cookiecutter.use_sentry_for_error_reporting == 'y' %}
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
{% endif %}
{%- if cookiecutter.use_whitenoise == 'y' %}
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
{% endif %}
{%- if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
{% endif %}
{%- if cookiecutter.use_opbeat == 'y' -%}
# opbeat integration
# See https://opbeat.com/languages/django/
INSTALLED_APPS += ['opbeat.contrib.django', ]
OPBEAT = {
'ORGANIZATION_ID': env('DJANGO_OPBEAT_ORGANIZATION_ID'),
'APP_ID': env('DJANGO_OPBEAT_APP_ID'),
'SECRET_TOKEN': env('DJANGO_OPBEAT_SECRET_TOKEN')
}
MIDDLEWARE = ['opbeat.contrib.django.middleware.OpbeatAPMMiddleware', ] + MIDDLEWARE
{% endif %}
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['{{cookiecutter.domain_name}}', ])
# END SITE CONFIGURATION
INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
{% if cookiecutter.use_whitenoise == 'y' -%}
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
{% else %}
# See:http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
{%- endif %}
# Static Assets
# ------------------------
{% if cookiecutter.use_whitenoise == 'y' -%}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
{% else %}
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ['collectfast', ] + INSTALLED_APPS
{%- endif %}
{% if cookiecutter.use_compressor == 'y'-%}
# COMPRESSOR
# ------------------------------------------------------------------------------
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_URL = STATIC_URL
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
{%- endif %}
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[{{cookiecutter.project_name}}]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
{% if cookiecutter.use_elasticbeanstalk_experimental.lower() == 'y' -%}
# Uses Amazon RDS for database hosting, which doesn't follow the Heroku-style spec
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('RDS_DB_NAME'),
'USER': env('RDS_USERNAME'),
'PASSWORD': env('RDS_PASSWORD'),
'HOST': env('RDS_HOSTNAME'),
'PORT': env('RDS_PORT'),
}
}
{% else %}
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
{%- endif %}
# CACHING
# ------------------------------------------------------------------------------
{% if cookiecutter.use_elasticbeanstalk_experimental.lower() == 'y' -%}
REDIS_LOCATION = 'redis://{}:{}/0'.format(
env('REDIS_ENDPOINT_ADDRESS'),
env('REDIS_PORT')
)
{% else %}
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
{%- endif %}
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
{% if cookiecutter.use_sentry_for_error_reporting == 'y' %}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
{% elif cookiecutter.use_sentry_for_error_reporting == 'n' %}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false', ],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', ],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins', ],
'propagate': True
}
}
}
{% endif %}
# Custom Admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| bsd-3-clause |
tukutela/Kay-Framework | kay/lib/werkzeug/contrib/atom.py | 25 | 14976 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
| bsd-3-clause |
fo2rist/infra-strike | backend/venv/Lib/site-packages/pip/_vendor/distlib/locators.py | 129 | 50493 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
| lgpl-3.0 |
crobby/sahara | sahara/tests/unit/plugins/cdh/test_config_helper.py | 11 | 1492 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_3_0 import config_helper as c_h
from sahara.tests.unit import base
from sahara.tests.unit.plugins.cdh import utils as ctu
class ConfigHelperTestCase(base.SaharaTestCase):
def test_is_swift_enabled(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertTrue(c_h.is_swift_enabled(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.ENABLE_SWIFT.name: False}})
self.assertFalse(c_h.is_swift_enabled(cluster))
def test_get_swift_lib_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(c_h.DEFAULT_SWIFT_LIB_URL,
c_h.get_swift_lib_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={'general': {c_h.SWIFT_LIB_URL.name: 'spam'}})
self.assertEqual('spam', c_h.get_swift_lib_url(cluster))
| apache-2.0 |
tsdgeos/snapcraft | snapcraft/tests/test_formatting_utils.py | 7 | 1952 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft import formatting_utils
from snapcraft import tests
class HumanizeListTestCases(tests.TestCase):
def test_no_items(self):
items = []
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, '')
def test_one_item(self):
items = ['foo']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'foo'")
def test_two_items(self):
items = ['foo', 'bar']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'bar' and 'foo'",
"Expected 'bar' before 'foo' due to sorting")
def test_three_items(self):
items = ['foo', 'bar', 'baz']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'bar', 'baz', and 'foo'")
def test_four_items(self):
items = ['foo', 'bar', 'baz', 'qux']
output = formatting_utils.humanize_list(items, 'and')
self.assertEqual(output, "'bar', 'baz', 'foo', and 'qux'")
def test_another_conjunction(self):
items = ['foo', 'bar', 'baz', 'qux']
output = formatting_utils.humanize_list(items, 'or')
self.assertEqual(output, "'bar', 'baz', 'foo', or 'qux'")
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.